diff --git a/.github/example-run/testbed_ui.ron b/.github/example-run/testbed_ui.ron index 579f791d66..3e2b22dd98 100644 --- a/.github/example-run/testbed_ui.ron +++ b/.github/example-run/testbed_ui.ron @@ -1,6 +1,4 @@ ( events: [ - (100, Screenshot), - (200, AppExit), ] ) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 82446ac5b4..c1e5575a52 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -36,11 +36,3 @@ println!("My super cool code."); ``` - -## Migration Guide - -> This section is optional. If there are no breaking changes, you can delete this section. - -- If this PR is a breaking change (relative to the last release of Bevy), describe how a user might need to migrate their code to support these changes -- Simply adding new functionality is not a breaking change. -- Fixing behavior that was definitely a bug, rather than a questionable design choice is not a breaking change. diff --git a/.github/workflows/action-on-PR-labeled.yml b/.github/workflows/action-on-PR-labeled.yml index 9887494a48..9e5835c1f7 100644 --- a/.github/workflows/action-on-PR-labeled.yml +++ b/.github/workflows/action-on-PR-labeled.yml @@ -12,19 +12,63 @@ permissions: pull-requests: 'write' jobs: - comment-on-breaking-change-label: + comment-on-migration-guide-label: runs-on: ubuntu-latest - if: github.event.label.name == 'M-Needs-Migration-Guide' && !contains(github.event.pull_request.body, '## Migration Guide') + if: github.event.label.name == 'M-Needs-Migration-Guide' steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 2 + - name: Get changes + id: get_changes + shell: bash {0} + run: | + git fetch --depth=1 origin $BASE_SHA + git diff --exit-code $BASE_SHA $HEAD_SHA -- ./release-content/migration-guides + echo "found_changes=$?" >> $GITHUB_OUTPUT + env: + BASE_SHA: ${{ github.event.pull_request.base.sha }} + HEAD_SHA: ${{ github.event.pull_request.head.sha }} - uses: actions/github-script@v7 + if: steps.get_changes.outputs.found_changes == '0' with: script: | await github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: `It looks like your PR is a breaking change, but you didn't provide a migration guide. + body: `It looks like your PR is a breaking change, but **you didn't provide a migration guide**. - Could you add some context on what users should update when this change get released in a new version of Bevy? - It will be used to help writing the migration guide for the version. Putting it after a \`## Migration Guide\` will help it get automatically picked up by our tooling.` + Please review the [instructions for writing migration guides](https://github.com/bevyengine/bevy/tree/main/release-content/migration_guides.md), then expand or revise the content in the [migration guides directory](https://github.com/bevyengine/bevy/tree/main/release-content/migration-guides) to reflect your changes.` + }) + comment-on-release-note-label: + runs-on: ubuntu-latest + if: github.event.label.name == 'M-Needs-Release-Note' + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 2 + - name: Get changes + id: get_changes + shell: bash {0} + run: | + git fetch --depth=1 origin $BASE_SHA + git diff --exit-code $BASE_SHA $HEAD_SHA -- ./release-content/release-notes + echo "found_changes=$?" >> $GITHUB_OUTPUT + env: + BASE_SHA: ${{ github.event.pull_request.base.sha }} + HEAD_SHA: ${{ github.event.pull_request.head.sha }} + - uses: actions/github-script@v7 + if: steps.get_changes.outputs.found_changes == '0' + with: + script: | + await github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `It looks like your PR has been selected for a highlight in the next release blog post, but **you didn't provide a release note**. + + Please review the [instructions for writing release notes](https://github.com/bevyengine/bevy/tree/main/release-content/release_notes.md), then expand or revise the content in the [release notes directory](https://github.com/bevyengine/bevy/tree/main/release-content/release_notes) to showcase your changes.` }) diff --git a/.github/workflows/ci-comment-failures.yml b/.github/workflows/ci-comment-failures.yml index d926390993..f1fb5a54be 100644 --- a/.github/workflows/ci-comment-failures.yml +++ b/.github/workflows/ci-comment-failures.yml @@ -48,8 +48,21 @@ jobs: return "true" - run: unzip missing-examples.zip if: ${{ steps.find-artifact.outputs.result == 'true' }} - - name: 'Comment on PR' + - name: "Check if last comment is already from actions" if: ${{ steps.find-artifact.outputs.result == 'true' }} + id: check-last-comment + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR=`cat ./NR` + if [[ `gh api --jq '.[-1].user.login' /repos/bevyengine/bevy/issues/$PR/comments` == 'github-actions[bot]' ]] + then + echo "result=true" >> $GITHUB_OUTPUT + else + echo "result=false" >> $GITHUB_OUTPUT + fi + - name: "Comment on PR" + if: ${{ steps.find-artifact.outputs.result == 'true' && steps.check-last-comment.outputs.result == 'false' }} uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} @@ -106,8 +119,21 @@ jobs: return "true" - run: unzip missing-features.zip if: ${{ steps.find-artifact.outputs.result == 'true' }} - - name: 'Comment on PR' + - name: "Check if last comment is already from actions" if: ${{ steps.find-artifact.outputs.result == 'true' }} + id: check-last-comment + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR=`cat ./NR` + if [[ `gh api --jq '.[-1].user.login' /repos/bevyengine/bevy/issues/$PR/comments` == 'github-actions[bot]' ]] + then + echo "result=true" >> $GITHUB_OUTPUT + else + echo "result=false" >> $GITHUB_OUTPUT + fi + - name: "Comment on PR" + if: ${{ steps.find-artifact.outputs.result == 'true' && steps.check-last-comment.outputs.result == 'false' }} uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} @@ -164,8 +190,21 @@ jobs: return "true" - run: unzip msrv.zip if: ${{ steps.find-artifact.outputs.result == 'true' }} - - name: 'Comment on PR' + - name: "Check if last comment is already from actions" if: ${{ steps.find-artifact.outputs.result == 'true' }} + id: check-last-comment + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR=`cat ./NR` + if [[ `gh api --jq '.[-1].user.login' /repos/bevyengine/bevy/issues/$PR/comments` == 'github-actions[bot]' ]] + then + echo "result=true" >> $GITHUB_OUTPUT + else + echo "result=false" >> $GITHUB_OUTPUT + fi + - name: "Comment on PR" + if: ${{ steps.find-artifact.outputs.result == 'true' && steps.check-last-comment.outputs.result == 'false' }} uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} @@ -178,64 +217,3 @@ jobs: issue_number: issue_number, body: 'Your PR increases Bevy Minimum Supported Rust Version. Please update the `rust-version` field in the root Cargo.toml file.' }); - - make-macos-screenshots-available: - runs-on: ubuntu-latest - timeout-minutes: 30 - outputs: - branch-name: ${{ steps.branch-name.outputs.result }} - steps: - - name: 'Download artifact' - id: find-artifact - uses: actions/github-script@v7 - with: - result-encoding: string - script: | - var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ - owner: context.repo.owner, - repo: context.repo.repo, - run_id: ${{github.event.workflow_run.id }}, - }); - var matchArtifacts = artifacts.data.artifacts.filter((artifact) => { - return artifact.name == "screenshots-macos" - }); - if (matchArtifacts.length == 0) { return "false" } - var matchArtifact = matchArtifacts[0]; - var download = await github.rest.actions.downloadArtifact({ - owner: context.repo.owner, - repo: context.repo.repo, - artifact_id: matchArtifact.id, - archive_format: 'zip', - }); - var fs = require('fs'); - fs.writeFileSync('${{github.workspace}}/screenshots-macos.zip', Buffer.from(download.data)); - return "true" - - name: prepare artifact folder - run: | - unzip screenshots-macos.zip - mkdir screenshots - mv screenshots-* screenshots/ - - name: save screenshots - uses: actions/upload-artifact@v4 - with: - name: screenshots-macos - path: screenshots - - name: branch name - id: branch-name - run: | - if [ -f PR ]; then - echo "result=PR-$(cat PR)-${{ github.event.workflow_run.head_branch }}" >> $GITHUB_OUTPUT - else - echo "result=${{ github.event.workflow_run.head_branch }}" >> $GITHUB_OUTPUT - fi - - compare-macos-screenshots: - name: Compare macOS screenshots - needs: [make-macos-screenshots-available] - uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml - with: - commit: ${{ github.event.workflow_run.head_sha }} - branch: ${{ needs.make-macos-screenshots-available.outputs.branch-name }} - artifact: screenshots-macos - os: macos - secrets: inherit diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a7769bc92b..f57f403115 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,7 +5,6 @@ on: pull_request: push: branches: - - main - release-* env: @@ -152,7 +151,56 @@ jobs: - name: Install Linux dependencies uses: ./.github/actions/install-linux-deps - name: Check Compile - run: cargo run -p ci -- compile-check-no-std + run: cargo check -p bevy --no-default-features --features default_no_std --target x86_64-unknown-none + check-compiles-no-std-portable-atomic: + runs-on: ubuntu-latest + timeout-minutes: 30 + needs: ci + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + crates/bevy_ecs_compile_fail_tests/target/ + crates/bevy_reflect_compile_fail_tests/target/ + key: ${{ runner.os }}-cargo-check-compiles-no-std-portable-atomic-${{ hashFiles('**/Cargo.toml') }} + - uses: dtolnay/rust-toolchain@stable + with: + targets: thumbv6m-none-eabi + - name: Install Linux dependencies + uses: ./.github/actions/install-linux-deps + - name: Check Compile + run: cargo check -p bevy --no-default-features --features default_no_std --target thumbv6m-none-eabi + + check-compiles-no-std-examples: + runs-on: ubuntu-latest + timeout-minutes: 30 + needs: ci + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + crates/bevy_ecs_compile_fail_tests/target/ + crates/bevy_reflect_compile_fail_tests/target/ + key: ${{ runner.os }}-cargo-check-compiles-no-std-examples-${{ hashFiles('**/Cargo.toml') }} + - uses: dtolnay/rust-toolchain@stable + with: + targets: x86_64-unknown-none + - name: Install Linux dependencies + uses: ./.github/actions/install-linux-deps + - name: Check Compile + run: cd examples/no_std/library && cargo check --no-default-features --features libm,critical-section --target x86_64-unknown-none build-wasm: runs-on: ubuntu-latest @@ -211,7 +259,7 @@ jobs: # Full git history is needed to get a proper list of changed files within `super-linter` fetch-depth: 0 - name: Run Markdown Lint - uses: docker://ghcr.io/github/super-linter:slim-v4 + uses: super-linter/super-linter/slim@v7.3.0 env: MULTI_STATUS: false VALIDATE_ALL_CODEBASE: false @@ -244,7 +292,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Check for typos - uses: crate-ci/typos@v1.29.5 + uses: crate-ci/typos@v1.31.1 - name: Typos info if: failure() run: | @@ -254,49 +302,6 @@ jobs: echo 'if you use VSCode, you can also install `Typos Spell Checker' echo 'You can find the extension here: https://marketplace.visualstudio.com/items?itemName=tekumara.typos-vscode' - run-examples-macos-metal: - runs-on: macos-latest - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - name: Disable audio - # Disable audio through a patch. on github m1 runners, audio timeouts after 15 minutes - run: git apply --ignore-whitespace tools/example-showcase/disable-audio.patch - - name: Run examples - run: | - for example in .github/example-run/*.ron; do - example_name=`basename $example .ron` - echo -n $example_name > last_example_run - echo "running $example_name - "`date` - time TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example cargo run --example $example_name --features "bevy_ci_testing,trace,trace_chrome" - sleep 10 - if [ `find ./ -maxdepth 1 -name 'screenshot-*.png' -print -quit` ]; then - mkdir screenshots-$example_name - mv screenshot-*.png screenshots-$example_name/ - fi - done - mkdir traces && mv trace*.json traces/ - mkdir screenshots && mv screenshots-* screenshots/ - - name: save traces - uses: actions/upload-artifact@v4 - with: - name: example-traces-macos - path: traces - - name: Save PR number - if: ${{ github.event_name == 'pull_request' }} - run: | - echo ${{ github.event.number }} > ./screenshots/PR - - name: save screenshots - uses: actions/upload-artifact@v4 - with: - name: screenshots-macos - path: screenshots - - uses: actions/upload-artifact@v4 - if: ${{ failure() && github.event_name == 'pull_request' }} - with: - name: example-run-macos - path: example-run/ check-doc: runs-on: ubuntu-latest timeout-minutes: 30 @@ -335,6 +340,7 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable - name: check for missing metadata id: missing-metadata run: cargo run -p build-templated-pages -- check-missing examples @@ -369,6 +375,7 @@ jobs: needs: check-missing-examples-in-docs steps: - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable - name: check for missing features id: missing-features run: cargo run -p build-templated-pages -- check-missing features @@ -412,6 +419,7 @@ jobs: ~/.cargo/git/db/ target/ key: ${{ runner.os }}-cargo-msrv-${{ hashFiles('**/Cargo.toml') }} + - uses: dtolnay/rust-toolchain@stable - name: get MSRV id: msrv run: | @@ -445,7 +453,7 @@ jobs: shell: bash run: | errors="" - for file in $(find examples tests -name '*.rs'); do + for file in $(find examples tests -name '*.rs' -not -path 'examples/mobile/*'); do if grep -q "use bevy_" "$file"; then errors+="ERROR: Detected internal Bevy import in $file\n" fi diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 17ac22019e..8a04fadc94 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -59,7 +59,7 @@ jobs: env: # needs to be in sync with [package.metadata.docs.rs] RUSTFLAGS: --cfg docsrs_dep - RUSTDOCFLAGS: -Zunstable-options --cfg=docsrs --generate-link-to-definition + RUSTDOCFLAGS: -Zunstable-options --cfg=docsrs --generate-link-to-definition --html-after-content docs-rs/trait-tags.html run: | cargo doc \ -Zunstable-options \ diff --git a/.github/workflows/example-run-report.yml b/.github/workflows/example-run-report.yml new file mode 100644 index 0000000000..198dee72e4 --- /dev/null +++ b/.github/workflows/example-run-report.yml @@ -0,0 +1,120 @@ +name: Example Run - PR Comments + +# This workflow has write permissions on the repo +# It must not checkout a PR and run untrusted code + +# Also requesting write permissions on PR to be able to comment +permissions: + pull-requests: "write" + +on: + workflow_run: + workflows: ["Example Run"] + types: + - completed + +jobs: + make-macos-screenshots-available: + if: github.event.workflow_run.event == 'pull_request' + runs-on: ubuntu-latest + timeout-minutes: 30 + outputs: + branch-name: ${{ steps.branch-name.outputs.result }} + pr-number: ${{ steps.pr-number.outputs.result }} + steps: + - name: "Download artifact" + id: find-artifact + uses: actions/github-script@v7 + with: + result-encoding: string + script: | + var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: ${{github.event.workflow_run.id }}, + }); + var matchArtifacts = artifacts.data.artifacts.filter((artifact) => { + return artifact.name == "screenshots-macos" + }); + if (matchArtifacts.length == 0) { return "false" } + var matchArtifact = matchArtifacts[0]; + var download = await github.rest.actions.downloadArtifact({ + owner: context.repo.owner, + repo: context.repo.repo, + artifact_id: matchArtifact.id, + archive_format: 'zip', + }); + var fs = require('fs'); + fs.writeFileSync('${{github.workspace}}/screenshots-macos.zip', Buffer.from(download.data)); + return "true" + - name: prepare artifact folder + run: | + unzip screenshots-macos.zip + mkdir screenshots + mv screenshots-* screenshots/ + - name: save screenshots + uses: actions/upload-artifact@v4 + with: + name: screenshots-macos + path: screenshots + - name: branch name + id: branch-name + run: | + echo "result=PR-$(cat PR)-${{ github.event.workflow_run.head_branch }}" >> $GITHUB_OUTPUT + - name: PR number + id: pr-number + run: | + echo "result=$(cat PR)" >> $GITHUB_OUTPUT + + compare-macos-screenshots: + name: Compare macOS screenshots + needs: [make-macos-screenshots-available] + uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml + with: + commit: ${{ github.event.workflow_run.head_sha }} + branch: ${{ needs.make-macos-screenshots-available.outputs.branch-name }} + artifact: screenshots-macos + os: macos + secrets: inherit + + comment-on-pr: + name: Comment on PR + runs-on: ubuntu-latest + needs: [make-macos-screenshots-available, compare-macos-screenshots] + if: ${{ always() && needs.compare-macos-screenshots.result == 'failure' }} + steps: + - uses: actions/checkout@v4 + - name: "Check if PR already has label" + id: check-label + env: + PR: ${{ needs.make-macos-screenshots-available.outputs.pr-number }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + if [[ `gh api --jq '.labels.[].name' /repos/bevyengine/bevy/pulls/$PR` =~ "M-Deliberate-Rendering-Change" ]] + then + echo "result=true" >> $GITHUB_OUTPUT + else + echo "result=false" >> $GITHUB_OUTPUT + fi + - name: "Check if last comment is already from actions" + id: check-last-comment + env: + PR: ${{ needs.make-macos-screenshots-available.outputs.pr-number }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + if [[ `gh api --jq '.[-1].user.login' /repos/bevyengine/bevy/issues/$PR/comments` == 'github-actions[bot' ]] + then + echo "result=true" >> $GITHUB_OUTPUT + else + echo "result=false" >> $GITHUB_OUTPUT + fi + - name: "Comment on PR" + if: ${{ steps.check-label.outputs.result == 'false' && steps.check-last-comment.outputs.result == 'false' }} + env: + PROJECT: B04F67C0-C054-4A6F-92EC-F599FEC2FD1D + PR: ${{ needs.make-macos-screenshots-available.outputs.pr-number }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + LF=$'\n' + COMMENT_BODY="Your PR caused a change in the graphical output of an example or rendering test. This might be intentional, but it could also mean that something broke! ${LF}You can review it at https://pixel-eagle.com/project/$PROJECT?filter=PR-$PR ${LF} ${LF}If it's expected, please add the M-Deliberate-Rendering-Change label. ${LF} ${LF}If this change seems unrelated to your PR, you can consider updating your PR to target the latest main branch, either by rebasing or merging main into it." + gh issue comment $PR --body "$COMMENT_BODY" diff --git a/.github/workflows/example-run.yml b/.github/workflows/example-run.yml new file mode 100644 index 0000000000..676f676db5 --- /dev/null +++ b/.github/workflows/example-run.yml @@ -0,0 +1,187 @@ +name: Example Run + +on: + merge_group: + pull_request: + # also run when pushed to main to update reference screenshots + push: + branches: + - main + +env: + CARGO_TERM_COLOR: always + CARGO_INCREMENTAL: 0 + CARGO_PROFILE_TEST_DEBUG: 0 + CARGO_PROFILE_DEV_DEBUG: 0 + +jobs: + run-examples-macos-metal: + runs-on: macos-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - name: Disable audio + # Disable audio through a patch. on github m1 runners, audio timeouts after 15 minutes + run: git apply --ignore-whitespace tools/example-showcase/disable-audio.patch + - name: Run examples + run: | + for example in .github/example-run/*.ron; do + example_name=`basename $example .ron` + echo -n $example_name > last_example_run + echo "running $example_name - "`date` + time TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example cargo run --example $example_name --features "bevy_ci_testing,trace,trace_chrome" + sleep 10 + if [ `find ./ -maxdepth 1 -name 'screenshot-*.png' -print -quit` ]; then + mkdir screenshots-$example_name + mv screenshot-*.png screenshots-$example_name/ + fi + done + mkdir traces && mv trace*.json traces/ + mkdir screenshots && mv screenshots-* screenshots/ + - name: save traces + uses: actions/upload-artifact@v4 + with: + name: example-traces-macos + path: traces + - name: Save PR number + if: ${{ github.event_name == 'pull_request' }} + run: | + echo ${{ github.event.number }} > ./screenshots/PR + - name: save screenshots + uses: actions/upload-artifact@v4 + with: + name: screenshots-macos + path: screenshots + - uses: actions/upload-artifact@v4 + if: ${{ failure() && github.event_name == 'pull_request' }} + with: + name: example-run-macos + path: example-run/ + + compare-macos-screenshots: + if: ${{ github.event_name != 'pull_request' }} + name: Compare Macos screenshots + needs: [run-examples-macos-metal] + uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml + with: + commit: ${{ github.sha }} + branch: ${{ github.ref_name }} + artifact: screenshots-macos + os: macos + secrets: inherit + + run-examples-linux-vulkan: + if: ${{ github.event_name != 'pull_request' }} + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - name: Install Linux dependencies + uses: ./.github/actions/install-linux-deps + # At some point this may be merged into `install-linux-deps`, but for now it is its own step. + - name: Install additional Linux dependencies for Vulkan + run: | + sudo add-apt-repository ppa:kisak/turtle -y + sudo apt-get install --no-install-recommends libxkbcommon-x11-0 xvfb libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-run-examples-${{ hashFiles('**/Cargo.toml') }} + - uses: dtolnay/rust-toolchain@stable + - name: Run examples + run: | + for example in .github/example-run/*.ron; do + example_name=`basename $example .ron` + echo -n $example_name > last_example_run + echo "running $example_name - "`date` + time TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example xvfb-run cargo run --example $example_name --features "bevy_ci_testing,trace,trace_chrome" + sleep 10 + if [ `find ./ -maxdepth 1 -name 'screenshot-*.png' -print -quit` ]; then + mkdir screenshots-$example_name + mv screenshot-*.png screenshots-$example_name/ + fi + done + mkdir traces && mv trace*.json traces/ + mkdir screenshots && mv screenshots-* screenshots/ + - name: save traces + uses: actions/upload-artifact@v4 + with: + name: example-traces-linux + path: traces + - name: save screenshots + uses: actions/upload-artifact@v4 + with: + name: screenshots-linux + path: screenshots + - uses: actions/upload-artifact@v4 + if: ${{ failure() && github.event_name == 'pull_request' }} + with: + name: example-run-linux + path: example-run/ + + compare-linux-screenshots: + name: Compare Linux screenshots + needs: [run-examples-linux-vulkan] + uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml + with: + commit: ${{ github.sha }} + branch: ${{ github.ref_name }} + artifact: screenshots-linux + os: linux + secrets: inherit + + run-examples-on-windows-dx12: + if: ${{ github.event_name != 'pull_request' }} + runs-on: windows-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - name: Run examples + shell: bash + run: | + for example in .github/example-run/*.ron; do + example_name=`basename $example .ron` + echo -n $example_name > last_example_run + echo "running $example_name - "`date` + time WGPU_BACKEND=dx12 TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example cargo run --example $example_name --features "statically-linked-dxc,bevy_ci_testing,trace,trace_chrome" + sleep 10 + if [ `find ./ -maxdepth 1 -name 'screenshot-*.png' -print -quit` ]; then + mkdir screenshots-$example_name + mv screenshot-*.png screenshots-$example_name/ + fi + done + mkdir traces && mv trace*.json traces/ + mkdir screenshots && mv screenshots-* screenshots/ + - name: save traces + uses: actions/upload-artifact@v4 + with: + name: example-traces-windows + path: traces + - name: save screenshots + uses: actions/upload-artifact@v4 + with: + name: screenshots-windows + path: screenshots + - uses: actions/upload-artifact@v4 + if: ${{ failure() && github.event_name == 'pull_request' }} + with: + name: example-run-windows + path: example-run/ + + compare-windows-screenshots: + name: Compare Windows screenshots + needs: [run-examples-on-windows-dx12] + uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml + with: + commit: ${{ github.sha }} + branch: ${{ github.ref_name }} + artifact: screenshots-windows + os: windows + secrets: inherit diff --git a/.github/workflows/send-screenshots-to-pixeleagle.yml b/.github/workflows/send-screenshots-to-pixeleagle.yml index b43a316f25..ee2b5e3dd1 100644 --- a/.github/workflows/send-screenshots-to-pixeleagle.yml +++ b/.github/workflows/send-screenshots-to-pixeleagle.yml @@ -48,7 +48,7 @@ jobs: run: | # Create a new run with its associated metadata metadata='{"os":"${{ inputs.os }}", "commit": "${{ inputs.commit }}", "branch": "${{ inputs.branch }}"}' - run=`curl https://pixel-eagle.vleue.com/$project/runs --json "$metadata" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} | jq '.id'` + run=`curl https://pixel-eagle.com/$project/runs --json "$metadata" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} | jq '.id'` SAVEIFS=$IFS @@ -71,11 +71,11 @@ jobs: IFS=$SAVEIFS # Upload screenshots with unknown hashes - curl https://pixel-eagle.vleue.com/$project/runs/$run/hashes --json "$hashes" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} | jq '.[]|[.name] | @tsv' | + curl https://pixel-eagle.com/$project/runs/$run/hashes --json "$hashes" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} | jq '.[]|[.name] | @tsv' | while IFS=$'\t' read -r name; do name=`echo $name | tr -d '"'` echo "Uploading $name" - curl https://pixel-eagle.vleue.com/$project/runs/$run/screenshots -F "data=@./screenshots-$name" -F "screenshot=$name" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} + curl https://pixel-eagle.com/$project/runs/$run/screenshots -F "data=@./screenshots-$name" -F "screenshot=$name" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} echo done @@ -84,7 +84,7 @@ jobs: cd .. # Trigger comparison with the main branch on the same os - curl https://pixel-eagle.vleue.com/$project/runs/$run/compare/auto --json '{"os":"", "branch": "main"}' --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} > pixeleagle.json + curl https://pixel-eagle.com/$project/runs/$run/compare/auto --json '{"os":"", "branch": "main"}' --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} > pixeleagle.json # Log results compared_with=`cat pixeleagle.json | jq '.to'` diff --git a/.github/workflows/validation-jobs.yml b/.github/workflows/validation-jobs.yml index 6f4291b0aa..3667940857 100644 --- a/.github/workflows/validation-jobs.yml +++ b/.github/workflows/validation-jobs.yml @@ -5,7 +5,6 @@ on: pull_request: push: branches: - - main - release-* concurrency: @@ -80,112 +79,6 @@ jobs: - name: Build app for Android run: cd examples/mobile/android_example && chmod +x gradlew && ./gradlew build - run-examples-linux-vulkan: - # also run when pushed to main to update reference screenshots - if: ${{ github.event_name != 'pull_request' }} - runs-on: ubuntu-latest - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - name: Install Linux dependencies - uses: ./.github/actions/install-linux-deps - # At some point this may be merged into `install-linux-deps`, but for now it is its own step. - - name: Install additional Linux dependencies for Vulkan - run: | - sudo add-apt-repository ppa:kisak/turtle -y - sudo apt-get install --no-install-recommends libxkbcommon-x11-0 xvfb libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers - - uses: actions/cache@v4 - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - target/ - key: ${{ runner.os }}-cargo-run-examples-${{ hashFiles('**/Cargo.toml') }} - - uses: dtolnay/rust-toolchain@stable - - name: Run examples - run: | - for example in .github/example-run/*.ron; do - example_name=`basename $example .ron` - echo -n $example_name > last_example_run - echo "running $example_name - "`date` - time TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example xvfb-run cargo run --example $example_name --features "bevy_ci_testing,trace,trace_chrome" - sleep 10 - if [ `find ./ -maxdepth 1 -name 'screenshot-*.png' -print -quit` ]; then - mkdir screenshots-$example_name - mv screenshot-*.png screenshots-$example_name/ - fi - done - mkdir traces && mv trace*.json traces/ - mkdir screenshots && mv screenshots-* screenshots/ - - name: save traces - uses: actions/upload-artifact@v4 - with: - name: example-traces-linux - path: traces - - name: save screenshots - uses: actions/upload-artifact@v4 - with: - name: screenshots-linux - path: screenshots - - uses: actions/upload-artifact@v4 - if: ${{ failure() && github.event_name == 'pull_request' }} - with: - name: example-run-linux - path: example-run/ - - compare-linux-screenshots: - name: Compare Linux screenshots - needs: [run-examples-linux-vulkan] - uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml - with: - commit: ${{ github.sha }} - branch: ${{ github.ref_name }} - artifact: screenshots-linux - os: linux - secrets: inherit - - run-examples-on-windows-dx12: - # also run when pushed to main to update reference screenshots - if: ${{ github.event_name != 'pull_request' }} - runs-on: windows-latest - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - name: Run examples - shell: bash - run: | - for example in .github/example-run/*.ron; do - example_name=`basename $example .ron` - echo -n $example_name > last_example_run - echo "running $example_name - "`date` - time WGPU_BACKEND=dx12 TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example cargo run --example $example_name --features "bevy_ci_testing,trace,trace_chrome" - sleep 10 - if [ `find ./ -maxdepth 1 -name 'screenshot-*.png' -print -quit` ]; then - mkdir screenshots-$example_name - mv screenshot-*.png screenshots-$example_name/ - fi - done - mkdir traces && mv trace*.json traces/ - mkdir screenshots && mv screenshots-* screenshots/ - - name: save traces - uses: actions/upload-artifact@v4 - with: - name: example-traces-windows - path: traces - - name: save screenshots - uses: actions/upload-artifact@v4 - with: - name: screenshots-windows - path: screenshots - - uses: actions/upload-artifact@v4 - if: ${{ failure() && github.event_name == 'pull_request' }} - with: - name: example-run-windows - path: example-run/ - run-examples-on-wasm: if: ${{ github.event_name == 'merge_group' }} runs-on: ubuntu-latest @@ -208,13 +101,6 @@ jobs: target/ key: ${{ runner.os }}-wasm-run-examples-${{ hashFiles('**/Cargo.toml') }} - - name: install xvfb, llvmpipe and lavapipe - run: | - sudo apt-get update -y -qq - sudo add-apt-repository ppa:kisak/turtle -y - sudo apt-get update - sudo apt install -y xvfb libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers - - name: Install wasm-bindgen run: cargo install --force wasm-bindgen-cli diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml index 372e903e6d..b4ddffdb9d 100644 --- a/.github/workflows/weekly.yml +++ b/.github/workflows/weekly.yml @@ -12,6 +12,7 @@ env: CARGO_INCREMENTAL: 0 CARGO_PROFILE_TEST_DEBUG: 0 CARGO_PROFILE_DEV_DEBUG: 0 + ISSUE_TITLE: Main branch fails to compile on Rust beta. # The jobs listed here are intentionally skipped when running on forks, for a number of reasons: # @@ -82,6 +83,30 @@ jobs: # See tools/ci/src/main.rs for the commands this runs run: cargo run -p ci -- compile + close-any-open-issues: + runs-on: ubuntu-latest + needs: ['test', 'lint', 'check-compiles'] + permissions: + issues: write + steps: + - name: Close issues + run: | + previous_issue_number=$(gh issue list \ + --search "$ISSUE_TITLE in:title" \ + --json number \ + --jq '.[0].number') + if [[ -n $previous_issue_number ]]; then + gh issue close $previous_issue_number \ + -r completed \ + -c $COMMENT + fi + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_REPO: ${{ github.repository }} + COMMENT: | + [Last pipeline run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) successfully completed. Closing issue. + + open-issue: name: Warn that weekly CI fails runs-on: ubuntu-latest @@ -95,7 +120,7 @@ jobs: - name: Create issue run: | previous_issue_number=$(gh issue list \ - --search "$TITLE in:title" \ + --search "$ISSUE_TITLE in:title" \ --json number \ --jq '.[0].number') if [[ -n $previous_issue_number ]]; then @@ -103,14 +128,13 @@ jobs: --body "Weekly pipeline still fails: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" else gh issue create \ - --title "$TITLE" \ + --title "$ISSUE_TITLE" \ --label "$LABELS" \ --body "$BODY" fi env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_REPO: ${{ github.repository }} - TITLE: Main branch fails to compile on Rust beta. LABELS: C-Bug,S-Needs-Triage BODY: | ## Weekly CI run has failed. diff --git a/CREDITS.md b/CREDITS.md index cc8b15083a..c0375bd38e 100644 --- a/CREDITS.md +++ b/CREDITS.md @@ -20,8 +20,8 @@ * Cake from [Kenney's Food Kit](https://www.kenney.nl/assets/food-kit) (CC0 1.0 Universal) * Ground tile from [Kenney's Tower Defense Kit](https://www.kenney.nl/assets/tower-defense-kit) (CC0 1.0 Universal) * Game icons from [Kenney's Game Icons](https://www.kenney.nl/assets/game-icons) (CC0 1.0 Universal) -* Space ships from [Kenny's Simple Space Kit](https://www.kenney.nl/assets/simple-space) (CC0 1.0 Universal) -* UI borders from [Kenny's Fantasy UI Borders Kit](https://kenney.nl/assets/fantasy-ui-borders) (CC0 1.0 Universal) +* Space ships from [Kenney's Simple Space Kit](https://www.kenney.nl/assets/simple-space) (CC0 1.0 Universal) +* UI borders from [Kenney's Fantasy UI Borders Kit](https://kenney.nl/assets/fantasy-ui-borders) (CC0 1.0 Universal) * glTF animated fox from [glTF Sample Models][fox] * Low poly fox [by PixelMannen] (CC0 1.0 Universal) * Rigging and animation [by @tomkranis on Sketchfab] ([CC-BY 4.0]) @@ -32,7 +32,7 @@ * Epic orchestra music sample, modified to loop, from [Migfus20](https://freesound.org/people/Migfus20/sounds/560449/) ([CC BY 4.0 DEED](https://creativecommons.org/licenses/by/4.0/)) [MorphStressTest]: https://github.com/KhronosGroup/glTF-Sample-Models/tree/master/2.0/MorphStressTest -[fox]: https://github.com/KhronosGroup/glTF-Sample-Models/tree/master/2.0/Fox +[fox]: https://github.com/KhronosGroup/glTF-Sample-Assets/tree/main/Models/Fox [by PixelMannen]: https://opengameart.org/content/fox-and-shiba [by @tomkranis on Sketchfab]: https://sketchfab.com/models/371dea88d7e04a76af5763f2a36866bc [CC-BY 4.0]: https://creativecommons.org/licenses/by/4.0/ diff --git a/Cargo.toml b/Cargo.toml index 84f00020f3..93d2e040e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy" version = "0.16.0-dev" -edition = "2021" +edition = "2024" categories = ["game-engines", "graphics", "gui", "rendering"] description = "A refreshingly simple data-driven game engine and app framework" exclude = ["assets/", "tools/", ".github/", "crates/", "examples/wasm/assets/"] @@ -10,7 +10,7 @@ keywords = ["game", "engine", "gamedev", "graphics", "bevy"] license = "MIT OR Apache-2.0" repository = "https://github.com/bevyengine/bevy" documentation = "https://docs.rs/bevy" -rust-version = "1.83.0" +rust-version = "1.85.0" [workspace] resolver = "2" @@ -19,9 +19,15 @@ members = [ "crates/*", # Several crates with macros have "compile fail" tests nested inside them, also known as UI # tests, that verify diagnostic output does not accidentally change. - "crates/*/compile_fail", + # TODO: Use a glob pattern once they are fixed in `dependabot-core` + # TODO: See https://github.com/bevyengine/bevy/issues/17876 for context. + "crates/bevy_derive/compile_fail", + "crates/bevy_ecs/compile_fail", + "crates/bevy_reflect/compile_fail", # Examples of compiling Bevy for mobile platforms. "examples/mobile", + # Examples of using Bevy on no_std platforms. + "examples/no_std/*", # Benchmarks "benches", # Internal tools that are not published. @@ -46,6 +52,9 @@ undocumented_unsafe_blocks = "warn" unwrap_or_default = "warn" needless_lifetimes = "allow" too_many_arguments = "allow" +nonstandard_macro_braces = "warn" +print_stdout = "warn" +print_stderr = "warn" ptr_as_ptr = "warn" ptr_cast_constness = "warn" @@ -91,6 +100,7 @@ undocumented_unsafe_blocks = "warn" unwrap_or_default = "warn" needless_lifetimes = "allow" too_many_arguments = "allow" +nonstandard_macro_braces = "warn" ptr_as_ptr = "warn" ptr_cast_constness = "warn" @@ -114,6 +124,8 @@ unused_qualifications = "warn" [features] default = [ + "std", + "async_executor", "android-game-activity", "android_shared_stdcxx", "animation", @@ -121,9 +133,12 @@ default = [ "bevy_audio", "bevy_color", "bevy_core_pipeline", + "bevy_anti_aliasing", "bevy_gilrs", "bevy_gizmos", "bevy_gltf", + "bevy_input_focus", + "bevy_log", "bevy_mesh_picking_backend", "bevy_pbr", "bevy_picking", @@ -150,6 +165,9 @@ default = [ "x11", ] +# Recommended defaults for no_std applications +default_no_std = ["libm", "critical-section", "bevy_color", "bevy_state"] + # Provides an implementation for picking meshes bevy_mesh_picking_backend = [ "bevy_picking", @@ -196,6 +214,13 @@ bevy_core_pipeline = [ "bevy_render", ] +# Provides various anti aliasing solutions +bevy_anti_aliasing = [ + "bevy_internal/bevy_anti_aliasing", + "bevy_asset", + "bevy_render", +] + # Adds gamepad support bevy_gilrs = ["bevy_internal/bevy_gilrs"] @@ -208,6 +233,7 @@ bevy_pbr = [ "bevy_asset", "bevy_render", "bevy_core_pipeline", + "bevy_anti_aliasing", ] # Provides picking functionality @@ -225,6 +251,7 @@ bevy_sprite = [ "bevy_render", "bevy_core_pipeline", "bevy_color", + "bevy_anti_aliasing", ] # Provides text functionality @@ -237,6 +264,7 @@ bevy_ui = [ "bevy_text", "bevy_sprite", "bevy_color", + "bevy_anti_aliasing", ] # Windowing layer @@ -257,6 +285,15 @@ bevy_dev_tools = ["bevy_internal/bevy_dev_tools"] # Enable the Bevy Remote Protocol bevy_remote = ["bevy_internal/bevy_remote"] +# Enable integration with `tracing` and `log` +bevy_log = ["bevy_internal/bevy_log"] + +# Enable input focus subsystem +bevy_input_focus = ["bevy_internal/bevy_input_focus"] + +# Use the configurable global error handler as the default error handler. +configurable_error_handler = ["bevy_internal/configurable_error_handler"] + # Enable passthrough loading for SPIR-V shaders (Only supported on Vulkan, shader capabilities and extensions must agree with the platform implementation) spirv_shader_passthrough = ["bevy_internal/spirv_shader_passthrough"] @@ -277,7 +314,7 @@ trace_tracy_memory = [ ] # Tracing support -trace = ["bevy_internal/trace"] +trace = ["bevy_internal/trace", "dep:tracing"] # Basis Universal compressed texture support basis-universal = ["bevy_internal/basis-universal"] @@ -423,6 +460,9 @@ shader_format_glsl = ["bevy_internal/shader_format_glsl"] # Enable support for shaders in SPIR-V shader_format_spirv = ["bevy_internal/shader_format_spirv"] +# Enable support for shaders in WESL +shader_format_wesl = ["bevy_internal/shader_format_wesl"] + # Enable support for transmission-related textures in the `StandardMaterial`, at the risk of blowing past the global, per-shader texture limit on older/lower-end GPUs pbr_transmission_textures = ["bevy_internal/pbr_transmission_textures"] @@ -470,9 +510,6 @@ meshlet = ["bevy_internal/meshlet"] # Enables processing meshes into meshlet meshes for bevy_pbr meshlet_processor = ["bevy_internal/meshlet_processor"] -# Enable support for the ios_simulator by downgrading some rendering capabilities -ios_simulator = ["bevy_internal/ios_simulator"] - # Enable built in global state machines bevy_state = ["bevy_internal/bevy_state"] @@ -482,14 +519,33 @@ track_location = ["bevy_internal/track_location"] # Enable function reflection reflect_functions = ["bevy_internal/reflect_functions"] +# Enable documentation reflection +reflect_documentation = ["bevy_internal/reflect_documentation"] + # Enable winit custom cursor support custom_cursor = ["bevy_internal/custom_cursor"] # Experimental support for nodes that are ignored for UI layouting ghost_nodes = ["bevy_internal/ghost_nodes"] +# Uses `async-executor` as a task execution backend. +async_executor = ["std", "bevy_internal/async_executor"] + +# Allows access to the `std` crate. +std = ["bevy_internal/std"] + +# `critical-section` provides the building blocks for synchronization primitives on all platforms, including `no_std`. +critical-section = ["bevy_internal/critical-section"] + +# Uses the `libm` maths library instead of the one provided in `std` and `core`. +libm = ["bevy_internal/libm"] + +# Enables use of browser APIs. Note this is currently only applicable on `wasm32` architectures. +web = ["bevy_internal/web"] + [dependencies] bevy_internal = { path = "crates/bevy_internal", version = "0.16.0-dev", default-features = false } +tracing = { version = "0.1", default-features = false, optional = true } # Wasm does not support dynamic linking. [target.'cfg(not(target_family = "wasm"))'.dependencies] @@ -522,7 +578,8 @@ hyper = { version = "1", features = ["server", "http1"] } http-body-util = "0.1" anyhow = "1" macro_rules_attribute = "0.2" -accesskit = "0.17" +accesskit = "0.18" +nonmax = "0.5" [target.'cfg(not(target_family = "wasm"))'.dev-dependencies] smol = "2" @@ -572,7 +629,7 @@ doc-scrape-examples = true [package.metadata.example.2d_viewport_to_world] name = "2D Viewport To World" -description = "Demonstrates how to use the `Camera::viewport_to_world_2d` method" +description = "Demonstrates how to use the `Camera::viewport_to_world_2d` method with a dynamic viewport and camera." category = "2D Rendering" wasm = true @@ -785,6 +842,17 @@ description = "Used to test alpha modes with mesh2d" category = "2D Rendering" wasm = true +[[example]] +name = "mesh2d_repeated_texture" +path = "examples/2d/mesh2d_repeated_texture.rs" +doc-scrape-examples = true + +[package.metadata.example.mesh2d_repeated_texture] +name = "Mesh2d Repeated Texture" +description = "Showcase of using `uv_transform` on the `ColorMaterial` of a `Mesh2d`" +category = "2D Rendering" +wasm = true + [[example]] name = "pixel_grid_snap" path = "examples/2d/pixel_grid_snap.rs" @@ -1530,6 +1598,7 @@ wasm = true name = "headless" path = "examples/app/headless.rs" doc-scrape-examples = true +required-features = ["bevy_log"] [package.metadata.example.headless] name = "Headless" @@ -1803,7 +1872,7 @@ path = "examples/asset/multi_asset_sync.rs" doc-scrape-examples = true [package.metadata.example.multi_asset_sync] -name = "Mult-asset synchronization" +name = "Multi-asset synchronization" description = "Demonstrates how to wait for multiple assets to be loaded." category = "Assets" wasm = true @@ -2164,6 +2233,7 @@ wasm = false name = "fallible_params" path = "examples/ecs/fallible_params.rs" doc-scrape-examples = true +required-features = ["configurable_error_handler"] [package.metadata.example.fallible_params] name = "Fallible System Parameters" @@ -2172,13 +2242,14 @@ category = "ECS (Entity Component System)" wasm = false [[example]] -name = "fallible_systems" -path = "examples/ecs/fallible_systems.rs" +name = "error_handling" +path = "examples/ecs/error_handling.rs" doc-scrape-examples = true +required-features = ["bevy_mesh_picking_backend", "configurable_error_handler"] -[package.metadata.example.fallible_systems] -name = "Fallible Systems" -description = "Systems that return results to handle errors" +[package.metadata.example.error_handling] +name = "Error handling" +description = "How to return and handle errors across the ECS" category = "ECS (Entity Component System)" wasm = false @@ -2734,6 +2805,18 @@ description = "A shader that uses the GLSL shading language" category = "Shaders" wasm = true +[[example]] +name = "shader_material_wesl" +path = "examples/shader/shader_material_wesl.rs" +doc-scrape-examples = true +required-features = ["shader_format_wesl"] + +[package.metadata.example.shader_material_wesl] +name = "Material - WESL" +description = "A shader that uses WESL" +category = "Shaders" +wasm = true + [[example]] name = "custom_shader_instancing" path = "examples/shader/custom_shader_instancing.rs" @@ -2745,6 +2828,18 @@ description = "A shader that renders a mesh multiple times in one draw call usin category = "Shaders" wasm = true +[[example]] +name = "custom_render_phase" +path = "examples/shader/custom_render_phase.rs" +doc-scrape-examples = true + +[package.metadata.example.custom_render_phase] +name = "Custom Render Phase" +description = "Shows how to make a complete render phase" +category = "Shaders" +wasm = true + + [[example]] name = "automatic_instancing" path = "examples/shader/automatic_instancing.rs" @@ -4006,7 +4101,6 @@ name = "Sprite Picking" description = "Demonstrates picking sprites and sprite atlases" category = "Picking" wasm = true -required-features = ["bevy_sprite_picking_backend"] [[example]] name = "debug_picking" @@ -4106,7 +4200,15 @@ panic = "abort" # for details on why this is needed. Since dependencies don't expect to be built # with `--cfg docsrs` (and thus fail to compile) we use a different cfg. rustc-args = ["--cfg", "docsrs_dep"] -rustdoc-args = ["-Zunstable-options", "--generate-link-to-definition"] +rustdoc-args = [ + "-Zunstable-options", + "--generate-link-to-definition", + # Embed tags to the top of documentation pages for common Bevy traits + # that are implemented by the current type, like `Component` or `Resource`. + # This makes it easier to see at a glance what types are used for. + "--html-after-content", + "docs-rs/trait-tags.html", +] all-features = true cargo-args = ["-Zunstable-options", "-Zrustdoc-scrape-examples"] @@ -4147,11 +4249,11 @@ doc-scrape-examples = true hidden = true [[example]] -name = "testbed_ui_layout_rounding" -path = "examples/testbed/ui_layout_rounding.rs" +name = "testbed_full_ui" +path = "examples/testbed/full_ui.rs" doc-scrape-examples = true -[package.metadata.example.testbed_ui_layout_rounding] +[package.metadata.example.testbed_full_ui] hidden = true [[example]] @@ -4197,3 +4299,50 @@ name = "Occlusion Culling" description = "Demonstration of Occlusion Culling" category = "3D Rendering" wasm = false + +[[example]] +name = "camera_controller" +path = "examples/helpers/camera_controller.rs" +doc-scrape-examples = true +crate-type = ["lib"] + +[package.metadata.example.camera_controller] +name = "Camera Controller" +description = "Example Free-Cam Styled Camera Controller" +category = "Helpers" +wasm = true + +[[example]] +name = "widgets" +path = "examples/helpers/widgets.rs" +doc-scrape-examples = true +crate-type = ["lib"] + +[package.metadata.example.widgets] +name = "Widgets" +description = "Example UI Widgets" +category = "Helpers" +wasm = true + +[[example]] +name = "no_std_library" +path = "examples/no_std/library/src/lib.rs" +doc-scrape-examples = true +crate-type = ["lib"] + +[package.metadata.example.no_std_library] +name = "`no_std` Compatible Library" +description = "Example library compatible with `std` and `no_std` targets" +category = "Embedded" +wasm = true + +[[example]] +name = "extended_material_bindless" +path = "examples/shader/extended_material_bindless.rs" +doc-scrape-examples = true + +[package.metadata.example.extended_material_bindless] +name = "Extended Bindless Material" +description = "Demonstrates bindless `ExtendedMaterial`" +category = "Shaders" +wasm = false diff --git a/assets/models/animated/Fox.glb b/assets/models/animated/Fox.glb index 2bb946e2d4..1ef5c0d056 100644 Binary files a/assets/models/animated/Fox.glb and b/assets/models/animated/Fox.glb differ diff --git a/assets/scenes/load_scene_example.scn.ron b/assets/scenes/load_scene_example.scn.ron index 813deb251e..e768a7b149 100644 --- a/assets/scenes/load_scene_example.scn.ron +++ b/assets/scenes/load_scene_example.scn.ron @@ -7,10 +7,7 @@ entities: { 4294967296: ( components: { - "bevy_ecs::name::Name": ( - hash: 17588334858059901562, - name: "joe", - ), + "bevy_ecs::name::Name": "joe", "bevy_transform::components::global_transform::GlobalTransform": ((1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0)), "bevy_transform::components::transform::Transform": ( translation: (0.0, 0.0, 0.0), diff --git a/assets/shaders/automatic_instancing.wgsl b/assets/shaders/automatic_instancing.wgsl new file mode 100644 index 0000000000..35276246b0 --- /dev/null +++ b/assets/shaders/automatic_instancing.wgsl @@ -0,0 +1,43 @@ +#import bevy_pbr::{ + mesh_functions, + view_transformations::position_world_to_clip +} + +@group(2) @binding(0) var texture: texture_2d; +@group(2) @binding(1) var texture_sampler: sampler; + +struct Vertex { + @builtin(instance_index) instance_index: u32, + @location(0) position: vec3, +}; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) world_position: vec4, + @location(1) color: vec4, +}; + +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + var out: VertexOutput; + + // Lookup the tag for the given mesh + let tag = mesh_functions::get_tag(vertex.instance_index); + var world_from_local = mesh_functions::get_world_from_local(vertex.instance_index); + out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); + out.clip_position = position_world_to_clip(out.world_position.xyz); + + let tex_dim = textureDimensions(texture); + // Find the texel coordinate as derived from the tag + let texel_coord = vec2(tag % tex_dim.x, tag / tex_dim.x); + + out.color = textureLoad(texture, texel_coord, 0); + return out; +} + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + return mesh.color; +} \ No newline at end of file diff --git a/assets/shaders/bindless_material.wgsl b/assets/shaders/bindless_material.wgsl index 9d9d068d4c..3de313b81a 100644 --- a/assets/shaders/bindless_material.wgsl +++ b/assets/shaders/bindless_material.wgsl @@ -1,14 +1,22 @@ #import bevy_pbr::forward_io::VertexOutput #import bevy_pbr::mesh_bindings::mesh +#import bevy_render::bindless::{bindless_samplers_filtering, bindless_textures_2d} struct Color { base_color: vec4, } +// This structure is a mapping from bindless index to the index in the +// appropriate slab +struct MaterialBindings { + material: u32, // 0 + color_texture: u32, // 1 + color_texture_sampler: u32, // 2 +} + #ifdef BINDLESS -@group(2) @binding(0) var material_color: binding_array; -@group(2) @binding(1) var material_color_texture: binding_array, 4>; -@group(2) @binding(2) var material_color_sampler: binding_array; +@group(2) @binding(0) var materials: array; +@group(2) @binding(10) var material_color: binding_array; #else // BINDLESS @group(2) @binding(0) var material_color: Color; @group(2) @binding(1) var material_color_texture: texture_2d; @@ -19,15 +27,15 @@ struct Color { fn fragment(in: VertexOutput) -> @location(0) vec4 { #ifdef BINDLESS let slot = mesh[in.instance_index].material_and_lightmap_bind_group_slot & 0xffffu; - let base_color = material_color[slot].base_color; + let base_color = material_color[materials[slot].material].base_color; #else // BINDLESS let base_color = material_color.base_color; #endif // BINDLESS return base_color * textureSampleLevel( #ifdef BINDLESS - material_color_texture[slot], - material_color_sampler[slot], + bindless_textures_2d[materials[slot].color_texture], + bindless_samplers_filtering[materials[slot].color_texture_sampler], #else // BINDLESS material_color_texture, material_color_sampler, diff --git a/assets/shaders/custom_material.wesl b/assets/shaders/custom_material.wesl new file mode 100644 index 0000000000..5113e1cbe0 --- /dev/null +++ b/assets/shaders/custom_material.wesl @@ -0,0 +1,20 @@ +import super::util::make_polka_dots; + +struct VertexOutput { + @builtin(position) position: vec4, + @location(2) uv: vec2, +} + +struct CustomMaterial { + // Needed for 16-bit alignment on WebGL2 + time: vec4, +} + +@group(2) @binding(0) var material: CustomMaterial; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + return make_polka_dots(mesh.uv, material.time.x); +} diff --git a/assets/shaders/custom_stencil.wgsl b/assets/shaders/custom_stencil.wgsl new file mode 100644 index 0000000000..6f2fa2da4f --- /dev/null +++ b/assets/shaders/custom_stencil.wgsl @@ -0,0 +1,41 @@ +//! A shader showing how to use the vertex position data to output the +//! stencil in the right position + +// First we import everything we need from bevy_pbr +// A 2d shader would be vevry similar but import from bevy_sprite instead +#import bevy_pbr::{ + mesh_functions, + view_transformations::position_world_to_clip +} + +struct Vertex { + // This is needed if you are using batching and/or gpu preprocessing + // It's a built in so you don't need to define it in the vertex layout + @builtin(instance_index) instance_index: u32, + // Like we defined for the vertex layout + // position is at location 0 + @location(0) position: vec3, +}; + +// This is the output of the vertex shader and we also use it as the input for the fragment shader +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) world_position: vec4, +}; + +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + var out: VertexOutput; + // This is how bevy computes the world position + // The vertex.instance_index is very important. Especially if you are using batching and gpu preprocessing + var world_from_local = mesh_functions::get_world_from_local(vertex.instance_index); + out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); + out.clip_position = position_world_to_clip(out.world_position.xyz); + return out; +} + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // Output a red color to represent the stencil of the mesh + return vec4(1.0, 0.0, 0.0, 1.0); +} diff --git a/assets/shaders/custom_ui_material.wgsl b/assets/shaders/custom_ui_material.wgsl index 815fceb5fc..528fa55302 100644 --- a/assets/shaders/custom_ui_material.wgsl +++ b/assets/shaders/custom_ui_material.wgsl @@ -2,7 +2,7 @@ #import bevy_ui::ui_vertex_output::UiVertexOutput @group(1) @binding(0) var color: vec4; -@group(1) @binding(1) var slider: f32; +@group(1) @binding(1) var slider: vec4; @group(1) @binding(2) var material_color_texture: texture_2d; @group(1) @binding(3) var material_color_sampler: sampler; @group(1) @binding(4) var border_color: vec4; @@ -50,7 +50,7 @@ fn fragment(in: UiVertexOutput) -> @location(0) vec4 { // sample the texture at this position if it's to the left of the slider value // otherwise return a fully transparent color - if in.uv.x < slider { + if in.uv.x < slider.x { let output_color = textureSample(material_color_texture, material_color_sampler, in.uv) * color; return output_color; } else { diff --git a/assets/shaders/extended_material_bindless.wgsl b/assets/shaders/extended_material_bindless.wgsl new file mode 100644 index 0000000000..f8650b0da7 --- /dev/null +++ b/assets/shaders/extended_material_bindless.wgsl @@ -0,0 +1,107 @@ +// The shader that goes with `extended_material_bindless.rs`. +// +// This code demonstrates how to write shaders that are compatible with both +// bindless and non-bindless mode. See the `#ifdef BINDLESS` blocks. + +#import bevy_pbr::{ + forward_io::{FragmentOutput, VertexOutput}, + mesh_bindings::mesh, + pbr_fragment::pbr_input_from_standard_material, + pbr_functions::{apply_pbr_lighting, main_pass_post_lighting_processing}, +} +#import bevy_render::bindless::{bindless_samplers_filtering, bindless_textures_2d} + +#ifdef BINDLESS +#import bevy_pbr::pbr_bindings::{material_array, material_indices} +#else // BINDLESS +#import bevy_pbr::pbr_bindings::material +#endif // BINDLESS + +// Stores the indices of the bindless resources in the bindless resource arrays, +// for the `ExampleBindlessExtension` fields. +struct ExampleBindlessExtendedMaterialIndices { + // The index of the `ExampleBindlessExtendedMaterial` data in + // `example_extended_material`. + material: u32, + // The index of the texture we're going to modulate the base color with in + // the `bindless_textures_2d` array. + modulate_texture: u32, + // The index of the sampler we're going to sample the modulated texture with + // in the `bindless_samplers_filtering` array. + modulate_texture_sampler: u32, +} + +// Plain data associated with this example material. +struct ExampleBindlessExtendedMaterial { + // The color that we multiply the base color, base color texture, and + // modulated texture with. + modulate_color: vec4, +} + +#ifdef BINDLESS + +// The indices of the bindless resources in the bindless resource arrays, for +// the `ExampleBindlessExtension` fields. +@group(2) @binding(100) var example_extended_material_indices: + array; +// An array that holds the `ExampleBindlessExtendedMaterial` plain old data, +// indexed by `ExampleBindlessExtendedMaterialIndices.material`. +@group(2) @binding(101) var example_extended_material: + array; + +#else // BINDLESS + +// In non-bindless mode, we simply use a uniform for the plain old data. +@group(2) @binding(50) var example_extended_material: ExampleBindlessExtendedMaterial; +@group(2) @binding(51) var modulate_texture: texture_2d; +@group(2) @binding(52) var modulate_sampler: sampler; + +#endif // BINDLESS + +@fragment +fn fragment( + in: VertexOutput, + @builtin(front_facing) is_front: bool, +) -> FragmentOutput { +#ifdef BINDLESS + // Fetch the material slot. We'll use this in turn to fetch the bindless + // indices from `example_extended_material_indices`. + let slot = mesh[in.instance_index].material_and_lightmap_bind_group_slot & 0xffffu; +#endif // BINDLESS + + // Generate a `PbrInput` struct from the `StandardMaterial` bindings. + var pbr_input = pbr_input_from_standard_material(in, is_front); + + // Calculate the UV for the texture we're about to sample. +#ifdef BINDLESS + let uv_transform = material_array[material_indices[slot].material].uv_transform; +#else // BINDLESS + let uv_transform = material.uv_transform; +#endif // BINDLESS + let uv = (uv_transform * vec3(in.uv, 1.0)).xy; + + // Multiply the base color by the `modulate_texture` and `modulate_color`. +#ifdef BINDLESS + // Notice how we fetch the texture, sampler, and plain extended material + // data from the appropriate arrays. + pbr_input.material.base_color *= textureSample( + bindless_textures_2d[example_extended_material_indices[slot].modulate_texture], + bindless_samplers_filtering[ + example_extended_material_indices[slot].modulate_texture_sampler + ], + uv + ) * example_extended_material[example_extended_material_indices[slot].material].modulate_color; +#else // BINDLESS + pbr_input.material.base_color *= textureSample(modulate_texture, modulate_sampler, uv) * + example_extended_material.modulate_color; +#endif // BINDLESS + + var out: FragmentOutput; + // Apply lighting. + out.color = apply_pbr_lighting(pbr_input); + // Apply in-shader post processing (fog, alpha-premultiply, and also + // tonemapping, debanding if the camera is non-HDR). Note this does not + // include fullscreen postprocessing effects like bloom. + out.color = main_pass_post_lighting_processing(pbr_input, out.color); + return out; +} diff --git a/assets/shaders/specialized_mesh_pipeline.wgsl b/assets/shaders/specialized_mesh_pipeline.wgsl index e307a7c48c..29c9069ec8 100644 --- a/assets/shaders/specialized_mesh_pipeline.wgsl +++ b/assets/shaders/specialized_mesh_pipeline.wgsl @@ -2,7 +2,7 @@ //! between the vertex and fragment shader. Also shows the custom vertex layout. // First we import everything we need from bevy_pbr -// A 2d shader would be vevry similar but import from bevy_sprite instead +// A 2D shader would be very similar but import from bevy_sprite instead #import bevy_pbr::{ mesh_functions, view_transformations::position_world_to_clip @@ -45,4 +45,4 @@ fn vertex(vertex: Vertex) -> VertexOutput { fn fragment(in: VertexOutput) -> @location(0) vec4 { // output the color directly return vec4(in.color, 1.0); -} \ No newline at end of file +} diff --git a/assets/shaders/storage_buffer.wgsl b/assets/shaders/storage_buffer.wgsl index 1859e8dde2..c27053b9a2 100644 --- a/assets/shaders/storage_buffer.wgsl +++ b/assets/shaders/storage_buffer.wgsl @@ -4,7 +4,6 @@ } @group(2) @binding(0) var colors: array, 5>; -@group(2) @binding(1) var color_id: u32; struct Vertex { @builtin(instance_index) instance_index: u32, @@ -20,11 +19,12 @@ struct VertexOutput { @vertex fn vertex(vertex: Vertex) -> VertexOutput { var out: VertexOutput; + let tag = mesh_functions::get_tag(vertex.instance_index); var world_from_local = mesh_functions::get_world_from_local(vertex.instance_index); out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); out.clip_position = position_world_to_clip(out.world_position.xyz); - out.color = colors[color_id]; + out.color = colors[tag]; return out; } diff --git a/assets/shaders/tonemapping_test_patterns.wgsl b/assets/shaders/tonemapping_test_patterns.wgsl index 7fe88bf548..2237bdf6c5 100644 --- a/assets/shaders/tonemapping_test_patterns.wgsl +++ b/assets/shaders/tonemapping_test_patterns.wgsl @@ -9,19 +9,19 @@ #import bevy_core_pipeline::tonemapping::tone_mapping #endif -// Sweep across hues on y axis with value from 0.0 to +15EV across x axis +// Sweep across hues on y axis with value from 0.0 to +15EV across x axis // quantized into 24 steps for both axis. fn color_sweep(uv_input: vec2) -> vec3 { var uv = uv_input; let steps = 24.0; uv.y = uv.y * (1.0 + 1.0 / steps); let ratio = 2.0; - + let h = PI * 2.0 * floor(1.0 + steps * uv.y) / steps; let L = floor(uv.x * steps * ratio) / (steps * ratio) - 0.5; - + var color = vec3(0.0); - if uv.y < 1.0 { + if uv.y < 1.0 { color = cos(h + vec3(0.0, 1.0, 2.0) * PI * 2.0 / 3.0); let maxRGB = max(color.r, max(color.g, color.b)); let minRGB = min(color.r, min(color.g, color.b)); diff --git a/assets/shaders/util.wesl b/assets/shaders/util.wesl new file mode 100644 index 0000000000..ebbf023926 --- /dev/null +++ b/assets/shaders/util.wesl @@ -0,0 +1,44 @@ +fn make_polka_dots(pos: vec2, time: f32) -> vec4 { + let scaled_pos = pos * 6.0; + let cell = vec2(fract(scaled_pos.x), fract(scaled_pos.y)); + var dist_from_center = distance(cell, vec2(0.5)); + + let is_even = (floor(scaled_pos.x) + floor(scaled_pos.y)) % 2.0; + + var dot_color = vec3(0.0); + var is_dot = 0.0; + + @if(!PARTY_MODE) { + let color1 = vec3(1.0, 0.4, 0.8); // pink + let color2 = vec3(0.6, 0.2, 1.0); // purple + dot_color = mix(color1, color2, is_even); + is_dot = step(dist_from_center, 0.3); + } @else { + let grid_x = floor(scaled_pos.x); + let grid_y = floor(scaled_pos.y); + let wave_speed = 3.0; + let wave_phase = time * wave_speed; + + let diagonal_pos = (grid_x + grid_y) * 0.5; + let wave_value = sin(diagonal_pos + wave_phase); + + let wave_normalized = (wave_value + 1.0) * 0.5; + + let color1 = vec3(1.0, 0.3, 0.7); + let color2 = vec3(0.5, 0.1, 1.0); + let intense_color1 = vec3(1.0, 0.1, 0.9); + let intense_color2 = vec3(0.8, 0.0, 1.0); + + let animated_color1 = mix(color1, intense_color1, wave_normalized); + let animated_color2 = mix(color2, intense_color2, wave_normalized); + + dot_color = mix(animated_color1, animated_color2, is_even); + + let size_mod = 0.15 * wave_value; + dist_from_center = dist_from_center * (1.0 - size_mod); + // Animate whether something is a dot by position but also time + is_dot = step(dist_from_center, 0.3 + wave_normalized * 0.2); + } + + return vec4(dot_color * is_dot, 1.0); +} diff --git a/benches/Cargo.toml b/benches/Cargo.toml index d4b032eab0..3f547d80d9 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "benches" -edition = "2021" +edition = "2024" description = "Benchmarks that test Bevy's performance" publish = false license = "MIT OR Apache-2.0" @@ -24,7 +24,7 @@ bevy_reflect = { path = "../crates/bevy_reflect", features = ["functions"] } bevy_render = { path = "../crates/bevy_render" } bevy_tasks = { path = "../crates/bevy_tasks" } bevy_utils = { path = "../crates/bevy_utils" } -bevy_platform_support = { path = "../crates/bevy_platform_support", default-features = false, features = [ +bevy_platform = { path = "../crates/bevy_platform", default-features = false, features = [ "std", ] } @@ -50,6 +50,7 @@ undocumented_unsafe_blocks = "warn" unwrap_or_default = "warn" needless_lifetimes = "allow" too_many_arguments = "allow" +nonstandard_macro_braces = "warn" ptr_as_ptr = "warn" ptr_cast_constness = "warn" diff --git a/benches/README.md b/benches/README.md index 2e91916e48..5f256f877b 100644 --- a/benches/README.md +++ b/benches/README.md @@ -25,10 +25,10 @@ cargo bench -p benches -- name_fragment cargo bench -p benches -- --list # Save a baseline to be compared against later. -cargo bench -p benches --save-baseline before +cargo bench -p benches -- --save-baseline before # Compare the current benchmarks against a baseline to find performance gains and regressions. -cargo bench -p benches --baseline before +cargo bench -p benches -- --baseline before ``` ## Criterion diff --git a/benches/benches/bevy_ecs/change_detection.rs b/benches/benches/bevy_ecs/change_detection.rs index 84a8d4c39e..92f3251abc 100644 --- a/benches/benches/bevy_ecs/change_detection.rs +++ b/benches/benches/bevy_ecs/change_detection.rs @@ -95,7 +95,7 @@ fn all_added_detection_generic(group: &mut BenchGroup, e let query = generic_filter_query::>(&mut world); (world, query) }, - |(ref mut world, ref mut query)| { + |(world, query)| { let mut count = 0; for entity in query.iter(world) { black_box(entity); @@ -143,7 +143,7 @@ fn all_changed_detection_generic + Default + let query = generic_filter_query::>(&mut world); (world, query) }, - |(ref mut world, ref mut query)| { + |(world, query)| { let mut count = 0; for entity in query.iter(world) { black_box(entity); @@ -196,7 +196,7 @@ fn few_changed_detection_generic + Default + let query = generic_filter_query::>(&mut world); (world, query) }, - |(ref mut world, ref mut query)| { + |(world, query)| { for entity in query.iter(world) { black_box(entity); } @@ -237,7 +237,7 @@ fn none_changed_detection_generic + Default>( let query = generic_filter_query::>(&mut world); (world, query) }, - |(ref mut world, ref mut query)| { + |(world, query)| { let mut count = 0; for entity in query.iter(world) { black_box(entity); @@ -343,7 +343,7 @@ fn multiple_archetype_none_changed_detection_generic< let query = generic_filter_query::>(&mut world); (world, query) }, - |(ref mut world, ref mut query)| { + |(world, query)| { let mut count = 0; for entity in query.iter(world) { black_box(entity); diff --git a/benches/benches/bevy_ecs/components/add_remove.rs b/benches/benches/bevy_ecs/components/add_remove.rs index b381ccb434..9b654e7a82 100644 --- a/benches/benches/bevy_ecs/components/add_remove.rs +++ b/benches/benches/bevy_ecs/components/add_remove.rs @@ -12,7 +12,7 @@ impl Benchmark { let mut world = World::default(); let entities = world - .spawn_batch(core::iter::repeat(A(0.)).take(10000)) + .spawn_batch(core::iter::repeat_n(A(0.), 10_000)) .collect(); Self(world, entities) } diff --git a/benches/benches/bevy_ecs/entity_cloning.rs b/benches/benches/bevy_ecs/entity_cloning.rs index 7c474cc4f8..0eaae27ce4 100644 --- a/benches/benches/bevy_ecs/entity_cloning.rs +++ b/benches/benches/bevy_ecs/entity_cloning.rs @@ -55,7 +55,7 @@ type ComplexBundle = (C1, C2, C3, C4, C5, C6, C7, C8, C9, C10); /// use the [`Reflect`] trait instead of [`Clone`]. fn reflection_cloner( world: &mut World, - recursive: bool, + linked_cloning: bool, ) -> EntityCloner { // Get mutable access to the type registry, creating it if it does not exist yet. let registry = world.get_resource_or_init::(); @@ -77,7 +77,7 @@ fn reflection_cloner( for component in component_ids { builder.override_clone_behavior_with_id(component, ComponentCloneBehavior::reflect()); } - builder.recursive(recursive); + builder.linked_cloning(linked_cloning); builder.finish() } @@ -136,7 +136,7 @@ fn bench_clone_hierarchy( reflection_cloner::(&mut world, true) } else { let mut builder = EntityCloner::build(&mut world); - builder.recursive(true); + builder.linked_cloning(true); builder.finish() }; @@ -153,9 +153,9 @@ fn bench_clone_hierarchy( hierarchy_level.clear(); - for parent_id in current_hierarchy_level { + for parent in current_hierarchy_level { for _ in 0..children { - let child_id = world.spawn((B::default(), ChildOf(parent_id))).id(); + let child_id = world.spawn((B::default(), ChildOf(parent))).id(); hierarchy_level.push(child_id); } } diff --git a/benches/benches/bevy_ecs/iteration/iter_simple.rs b/benches/benches/bevy_ecs/iteration/iter_simple.rs index 1fc86f5087..14cca69082 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple.rs @@ -19,15 +19,15 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Position(Vec3::X), Rotation(Vec3::X), Velocity(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query::<(&Velocity, &mut Position)>(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_foreach.rs b/benches/benches/bevy_ecs/iteration/iter_simple_foreach.rs index f0a41d18be..19396e95b0 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_foreach.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_foreach.rs @@ -19,15 +19,15 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Position(Vec3::X), Rotation(Vec3::X), Velocity(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query::<(&Velocity, &mut Position)>(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_foreach_sparse_set.rs b/benches/benches/bevy_ecs/iteration/iter_simple_foreach_sparse_set.rs index 0075c2706b..1e0db505c1 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_foreach_sparse_set.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_foreach_sparse_set.rs @@ -21,15 +21,15 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Position(Vec3::X), Rotation(Vec3::X), Velocity(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query::<(&Velocity, &mut Position)>(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide.rs b/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide.rs index 7dbd11d1e0..505d624eb8 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide.rs @@ -33,8 +33,8 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Rotation(Vec3::X), Position::<0>(Vec3::X), @@ -47,9 +47,9 @@ impl<'w> Benchmark<'w> { Velocity::<3>(Vec3::X), Position::<4>(Vec3::X), Velocity::<4>(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide_sparse_set.rs b/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide_sparse_set.rs index f520ffde42..88b58be0f2 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide_sparse_set.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide_sparse_set.rs @@ -35,8 +35,8 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Rotation(Vec3::X), Position::<0>(Vec3::X), @@ -49,9 +49,9 @@ impl<'w> Benchmark<'w> { Velocity::<3>(Vec3::X), Position::<4>(Vec3::X), Velocity::<4>(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_sparse_set.rs b/benches/benches/bevy_ecs/iteration/iter_simple_sparse_set.rs index e4ba375941..ed1c531c1d 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_sparse_set.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_sparse_set.rs @@ -21,15 +21,15 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Position(Vec3::X), Rotation(Vec3::X), Velocity(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query::<(&Velocity, &mut Position)>(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_system.rs b/benches/benches/bevy_ecs/iteration/iter_simple_system.rs index 18918ee234..2b6e828721 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_system.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_system.rs @@ -19,15 +19,15 @@ impl Benchmark { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Position(Vec3::X), Rotation(Vec3::X), Velocity(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); fn query_system(mut query: Query<(&Velocity, &mut Position)>) { for (velocity, mut position) in &mut query { diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_wide.rs b/benches/benches/bevy_ecs/iteration/iter_simple_wide.rs index 7d013b3bf6..dccd1fe8b3 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_wide.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_wide.rs @@ -33,8 +33,8 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Rotation(Vec3::X), Position::<0>(Vec3::X), @@ -47,9 +47,9 @@ impl<'w> Benchmark<'w> { Velocity::<3>(Vec3::X), Position::<4>(Vec3::X), Velocity::<4>(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_wide_sparse_set.rs b/benches/benches/bevy_ecs/iteration/iter_simple_wide_sparse_set.rs index 28a6dbd85d..49677dc1b9 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_wide_sparse_set.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_wide_sparse_set.rs @@ -35,8 +35,8 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Rotation(Vec3::X), Position::<0>(Vec3::X), @@ -49,9 +49,9 @@ impl<'w> Benchmark<'w> { Velocity::<3>(Vec3::X), Position::<4>(Vec3::X), Velocity::<4>(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/par_iter_simple.rs b/benches/benches/bevy_ecs/iteration/par_iter_simple.rs index 971598005a..92259cb98f 100644 --- a/benches/benches/bevy_ecs/iteration/par_iter_simple.rs +++ b/benches/benches/bevy_ecs/iteration/par_iter_simple.rs @@ -30,15 +30,15 @@ impl<'w> Benchmark<'w> { let mut world = World::new(); - let iter = world.spawn_batch( - core::iter::repeat(( + let iter = world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Position(Vec3::X), Rotation(Vec3::X), Velocity(Vec3::X), - )) - .take(100_000), - ); + ), + 100_000, + )); let entities = iter.into_iter().collect::>(); for i in 0..fragment { let mut e = world.entity_mut(entities[i as usize]); diff --git a/benches/benches/bevy_ecs/observers/simple.rs b/benches/benches/bevy_ecs/observers/simple.rs index bf2dd236d6..85207624e8 100644 --- a/benches/benches/bevy_ecs/observers/simple.rs +++ b/benches/benches/bevy_ecs/observers/simple.rs @@ -1,6 +1,10 @@ use core::hint::black_box; -use bevy_ecs::{entity::Entity, event::Event, observer::Trigger, world::World}; +use bevy_ecs::{ + event::Event, + observer::{Trigger, TriggerTargets}, + world::World, +}; use criterion::Criterion; use rand::{prelude::SliceRandom, SeedableRng}; @@ -46,6 +50,6 @@ fn empty_listener_base(trigger: Trigger) { black_box(trigger); } -fn send_base_event(world: &mut World, entities: &Vec) { +fn send_base_event(world: &mut World, entities: impl TriggerTargets) { world.trigger_targets(EventBase, entities); } diff --git a/benches/benches/bevy_ecs/scheduling/schedule.rs b/benches/benches/bevy_ecs/scheduling/schedule.rs index 0450428535..9844461d39 100644 --- a/benches/benches/bevy_ecs/scheduling/schedule.rs +++ b/benches/benches/bevy_ecs/scheduling/schedule.rs @@ -79,7 +79,7 @@ pub fn build_schedule(criterion: &mut Criterion) { // Benchmark graphs of different sizes. for graph_size in [100, 500, 1000] { // Basic benchmark without constraints. - group.bench_function(format!("{graph_size}_schedule_noconstraints"), |bencher| { + group.bench_function(format!("{graph_size}_schedule_no_constraints"), |bencher| { bencher.iter(|| { let mut app = App::new(); for _ in 0..graph_size { diff --git a/benches/benches/bevy_ecs/world/commands.rs b/benches/benches/bevy_ecs/world/commands.rs index 6ff63b2e20..8ad87862eb 100644 --- a/benches/benches/bevy_ecs/world/commands.rs +++ b/benches/benches/bevy_ecs/world/commands.rs @@ -106,6 +106,10 @@ pub fn insert_commands(criterion: &mut Criterion) { for entity in &entities { values.push((*entity, (Matrix::default(), Vec3::default()))); } + #[expect( + deprecated, + reason = "This needs to be supported for now, and therefore still needs the benchmark." + )] commands.insert_or_spawn_batch(values); command_queue.apply(&mut world); }); diff --git a/benches/benches/bevy_ecs/world/despawn.rs b/benches/benches/bevy_ecs/world/despawn.rs index ace88e744a..5419867a9e 100644 --- a/benches/benches/bevy_ecs/world/despawn.rs +++ b/benches/benches/bevy_ecs/world/despawn.rs @@ -1,5 +1,5 @@ use bevy_ecs::prelude::*; -use criterion::Criterion; +use criterion::{BatchSize, Criterion}; use glam::*; #[derive(Component)] @@ -13,18 +13,23 @@ pub fn world_despawn(criterion: &mut Criterion) { group.measurement_time(core::time::Duration::from_secs(4)); for entity_count in (0..5).map(|i| 10_u32.pow(i)) { - let mut world = World::default(); - for _ in 0..entity_count { - world.spawn((A(Mat4::default()), B(Vec4::default()))); - } - - let ents = world.iter_entities().map(|e| e.id()).collect::>(); group.bench_function(format!("{}_entities", entity_count), |bencher| { - bencher.iter(|| { - ents.iter().for_each(|e| { - world.despawn(*e); - }); - }); + bencher.iter_batched_ref( + || { + let mut world = World::default(); + for _ in 0..entity_count { + world.spawn((A(Mat4::default()), B(Vec4::default()))); + } + let ents = world.iter_entities().map(|e| e.id()).collect::>(); + (world, ents) + }, + |(world, ents)| { + ents.iter().for_each(|e| { + world.despawn(*e); + }); + }, + BatchSize::SmallInput, + ); }); } diff --git a/benches/benches/bevy_ecs/world/despawn_recursive.rs b/benches/benches/bevy_ecs/world/despawn_recursive.rs index dd1ca4325b..6ae59b10a5 100644 --- a/benches/benches/bevy_ecs/world/despawn_recursive.rs +++ b/benches/benches/bevy_ecs/world/despawn_recursive.rs @@ -1,5 +1,5 @@ use bevy_ecs::prelude::*; -use criterion::Criterion; +use criterion::{BatchSize, Criterion}; use glam::*; #[derive(Component)] @@ -13,22 +13,30 @@ pub fn world_despawn_recursive(criterion: &mut Criterion) { group.measurement_time(core::time::Duration::from_secs(4)); for entity_count in (0..5).map(|i| 10_u32.pow(i)) { - let mut world = World::default(); - for _ in 0..entity_count { - world - .spawn((A(Mat4::default()), B(Vec4::default()))) - .with_children(|parent| { - parent.spawn((A(Mat4::default()), B(Vec4::default()))); - }); - } - - let ents = world.iter_entities().map(|e| e.id()).collect::>(); group.bench_function(format!("{}_entities", entity_count), |bencher| { - bencher.iter(|| { - ents.iter().for_each(|e| { - world.entity_mut(*e).despawn(); - }); - }); + bencher.iter_batched_ref( + || { + let mut world = World::default(); + let parent_ents = (0..entity_count) + .map(|_| { + world + .spawn((A(Mat4::default()), B(Vec4::default()))) + .with_children(|parent| { + parent.spawn((A(Mat4::default()), B(Vec4::default()))); + }) + .id() + }) + .collect::>(); + + (world, parent_ents) + }, + |(world, parent_ents)| { + parent_ents.iter().for_each(|e| { + world.despawn(*e); + }); + }, + BatchSize::SmallInput, + ); }); } diff --git a/benches/benches/bevy_ecs/world/entity_hash.rs b/benches/benches/bevy_ecs/world/entity_hash.rs index 5e92443bf1..7e8dfb4a21 100644 --- a/benches/benches/bevy_ecs/world/entity_hash.rs +++ b/benches/benches/bevy_ecs/world/entity_hash.rs @@ -1,4 +1,4 @@ -use bevy_ecs::entity::{hash_set::EntityHashSet, Entity}; +use bevy_ecs::entity::{Entity, EntityHashSet}; use criterion::{BenchmarkId, Criterion, Throughput}; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; @@ -11,16 +11,16 @@ fn make_entity(rng: &mut impl Rng, size: usize) -> Entity { // * For ids, half are in [0, size), half are unboundedly larger. // * For generations, half are in [1, 3), half are unboundedly larger. - let x: f64 = rng.gen(); + let x: f64 = rng.r#gen(); let id = -(1.0 - x).log2() * (size as f64); - let x: f64 = rng.gen(); - let gen = 1.0 + -(1.0 - x).log2() * 2.0; + let x: f64 = rng.r#gen(); + let generation = 1.0 + -(1.0 - x).log2() * 2.0; // this is not reliable, but we're internal so a hack is ok - let bits = ((gen as u64) << 32) | (id as u64); + let bits = ((generation as u64) << 32) | (id as u64); let e = Entity::from_bits(bits); assert_eq!(e.index(), id as u32); - assert_eq!(e.generation(), gen as u32); + assert_eq!(e.generation(), generation as u32); e } diff --git a/benches/benches/bevy_math/bezier.rs b/benches/benches/bevy_math/bezier.rs index 27affcaa71..a95cb4a821 100644 --- a/benches/benches/bevy_math/bezier.rs +++ b/benches/benches/bevy_math/bezier.rs @@ -8,7 +8,10 @@ use criterion::{ criterion_group!(benches, segment_ease, curve_position, curve_iter_positions); fn segment_ease(c: &mut Criterion) { - let segment = black_box(CubicSegment::new_bezier(vec2(0.25, 0.1), vec2(0.25, 1.0))); + let segment = black_box(CubicSegment::new_bezier_easing( + vec2(0.25, 0.1), + vec2(0.25, 1.0), + )); c.bench_function(bench!("segment_ease"), |b| { let mut t = 0; diff --git a/benches/benches/bevy_reflect/list.rs b/benches/benches/bevy_reflect/list.rs index 872c2dd0cb..fcbe59accd 100644 --- a/benches/benches/bevy_reflect/list.rs +++ b/benches/benches/bevy_reflect/list.rs @@ -10,7 +10,7 @@ use criterion::{ criterion_group!( benches, concrete_list_apply, - concrete_list_clone_dynamic, + concrete_list_to_dynamic_list, dynamic_list_apply, dynamic_list_push ); @@ -75,26 +75,26 @@ fn concrete_list_apply(criterion: &mut Criterion) { let mut group = create_group(criterion, bench!("concrete_list_apply")); let empty_base = |_: usize| Vec::::new; - let full_base = |size: usize| move || iter::repeat(0).take(size).collect::>(); - let patch = |size: usize| iter::repeat(1).take(size).collect::>(); + let full_base = |size: usize| move || iter::repeat_n(0, size).collect::>(); + let patch = |size: usize| iter::repeat_n(1, size).collect::>(); list_apply(&mut group, "empty_base_concrete_patch", empty_base, patch); list_apply(&mut group, "empty_base_dynamic_patch", empty_base, |size| { - patch(size).clone_dynamic() + patch(size).to_dynamic_list() }); list_apply(&mut group, "same_len_concrete_patch", full_base, patch); list_apply(&mut group, "same_len_dynamic_patch", full_base, |size| { - patch(size).clone_dynamic() + patch(size).to_dynamic_list() }); group.finish(); } -fn concrete_list_clone_dynamic(criterion: &mut Criterion) { - let mut group = create_group(criterion, bench!("concrete_list_clone_dynamic")); +fn concrete_list_to_dynamic_list(criterion: &mut Criterion) { + let mut group = create_group(criterion, bench!("concrete_list_to_dynamic_list")); for size in SIZES { group.throughput(Throughput::Elements(size as u64)); @@ -103,9 +103,9 @@ fn concrete_list_clone_dynamic(criterion: &mut Criterion) { BenchmarkId::from_parameter(size), &size, |bencher, &size| { - let v = iter::repeat(0).take(size).collect::>(); + let v = iter::repeat_n(0, size).collect::>(); - bencher.iter(|| black_box(&v).clone_dynamic()); + bencher.iter(|| black_box(&v).to_dynamic_list()); }, ); } @@ -123,11 +123,11 @@ fn dynamic_list_push(criterion: &mut Criterion) { BenchmarkId::from_parameter(size), &size, |bencher, &size| { - let src = iter::repeat(()).take(size).collect::>(); + let src = iter::repeat_n((), size).collect::>(); let dst = DynamicList::default(); bencher.iter_batched( - || (src.clone(), dst.clone_dynamic()), + || (src.clone(), dst.to_dynamic_list()), |(src, mut dst)| { for item in src { dst.push(item); @@ -145,20 +145,20 @@ fn dynamic_list_push(criterion: &mut Criterion) { fn dynamic_list_apply(criterion: &mut Criterion) { let mut group = create_group(criterion, bench!("dynamic_list_apply")); - let empty_base = |_: usize| || Vec::::new().clone_dynamic(); - let full_base = |size: usize| move || iter::repeat(0).take(size).collect::>(); - let patch = |size: usize| iter::repeat(1).take(size).collect::>(); + let empty_base = |_: usize| || Vec::::new().to_dynamic_list(); + let full_base = |size: usize| move || iter::repeat_n(0, size).collect::>(); + let patch = |size: usize| iter::repeat_n(1, size).collect::>(); list_apply(&mut group, "empty_base_concrete_patch", empty_base, patch); list_apply(&mut group, "empty_base_dynamic_patch", empty_base, |size| { - patch(size).clone_dynamic() + patch(size).to_dynamic_list() }); list_apply(&mut group, "same_len_concrete_patch", full_base, patch); list_apply(&mut group, "same_len_dynamic_patch", full_base, |size| { - patch(size).clone_dynamic() + patch(size).to_dynamic_list() }); group.finish(); diff --git a/benches/benches/bevy_reflect/map.rs b/benches/benches/bevy_reflect/map.rs index 4525ed8598..1eab01a587 100644 --- a/benches/benches/bevy_reflect/map.rs +++ b/benches/benches/bevy_reflect/map.rs @@ -1,7 +1,7 @@ use core::{fmt::Write, hint::black_box, iter, time::Duration}; use benches::bench; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::{DynamicMap, Map}; use criterion::{ criterion_group, measurement::Measurement, AxisScale, BatchSize, BenchmarkGroup, BenchmarkId, @@ -108,7 +108,7 @@ fn concrete_map_apply(criterion: &mut Criterion) { ); map_apply(&mut group, "empty_base_dynamic_patch", empty_base, |size| { - key_range_patch(size).clone_dynamic() + key_range_patch(size).to_dynamic_map() }); map_apply( @@ -122,7 +122,7 @@ fn concrete_map_apply(criterion: &mut Criterion) { &mut group, "same_keys_dynamic_patch", key_range_base, - |size| key_range_patch(size).clone_dynamic(), + |size| key_range_patch(size).to_dynamic_map(), ); map_apply( @@ -136,7 +136,7 @@ fn concrete_map_apply(criterion: &mut Criterion) { &mut group, "disjoint_keys_dynamic_patch", key_range_base, - |size| disjoint_patch(size).clone_dynamic(), + |size| disjoint_patch(size).to_dynamic_map(), ); } @@ -145,7 +145,7 @@ fn u64_to_n_byte_key(k: u64, n: usize) -> String { write!(&mut key, "{}", k).unwrap(); // Pad key to n bytes. - key.extend(iter::repeat('\0').take(n - key.len())); + key.extend(iter::repeat_n('\0', n - key.len())); key } @@ -159,7 +159,7 @@ fn dynamic_map_apply(criterion: &mut Criterion) { (0..size as u64) .zip(iter::repeat(0)) .collect::>() - .clone_dynamic() + .to_dynamic_map() } }; @@ -183,7 +183,7 @@ fn dynamic_map_apply(criterion: &mut Criterion) { ); map_apply(&mut group, "empty_base_dynamic_patch", empty_base, |size| { - key_range_patch(size).clone_dynamic() + key_range_patch(size).to_dynamic_map() }); map_apply( @@ -197,7 +197,7 @@ fn dynamic_map_apply(criterion: &mut Criterion) { &mut group, "same_keys_dynamic_patch", key_range_base, - |size| key_range_patch(size).clone_dynamic(), + |size| key_range_patch(size).to_dynamic_map(), ); map_apply( @@ -211,7 +211,7 @@ fn dynamic_map_apply(criterion: &mut Criterion) { &mut group, "disjoint_keys_dynamic_patch", key_range_base, - |size| disjoint_patch(size).clone_dynamic(), + |size| disjoint_patch(size).to_dynamic_map(), ); } diff --git a/benches/benches/bevy_reflect/struct.rs b/benches/benches/bevy_reflect/struct.rs index d8f25554c3..7750213b6d 100644 --- a/benches/benches/bevy_reflect/struct.rs +++ b/benches/benches/bevy_reflect/struct.rs @@ -12,8 +12,8 @@ criterion_group!( concrete_struct_apply, concrete_struct_field, concrete_struct_type_info, - concrete_struct_clone, - dynamic_struct_clone, + concrete_struct_to_dynamic_struct, + dynamic_struct_to_dynamic_struct, dynamic_struct_apply, dynamic_struct_get_field, dynamic_struct_insert, @@ -113,7 +113,7 @@ fn concrete_struct_apply(criterion: &mut Criterion) { bencher.iter_batched( || { let (obj, _) = input(); - let patch = obj.clone_dynamic(); + let patch = obj.to_dynamic_struct(); (obj, patch) }, |(mut obj, patch)| obj.apply(black_box(&patch)), @@ -170,8 +170,8 @@ fn concrete_struct_type_info(criterion: &mut Criterion) { } } -fn concrete_struct_clone(criterion: &mut Criterion) { - let mut group = create_group(criterion, bench!("concrete_struct_clone")); +fn concrete_struct_to_dynamic_struct(criterion: &mut Criterion) { + let mut group = create_group(criterion, bench!("concrete_struct_to_dynamic_struct")); let structs: [(Box, Box); 5] = [ ( @@ -203,28 +203,28 @@ fn concrete_struct_clone(criterion: &mut Criterion) { BenchmarkId::new("NonGeneric", field_count), &standard, |bencher, s| { - bencher.iter(|| s.clone_dynamic()); + bencher.iter(|| s.to_dynamic_struct()); }, ); group.bench_with_input( BenchmarkId::new("Generic", field_count), &generic, |bencher, s| { - bencher.iter(|| s.clone_dynamic()); + bencher.iter(|| s.to_dynamic_struct()); }, ); } } -fn dynamic_struct_clone(criterion: &mut Criterion) { - let mut group = create_group(criterion, bench!("dynamic_struct_clone")); +fn dynamic_struct_to_dynamic_struct(criterion: &mut Criterion) { + let mut group = create_group(criterion, bench!("dynamic_struct_to_dynamic_struct")); let structs: [Box; 5] = [ - Box::new(Struct1::default().clone_dynamic()), - Box::new(Struct16::default().clone_dynamic()), - Box::new(Struct32::default().clone_dynamic()), - Box::new(Struct64::default().clone_dynamic()), - Box::new(Struct128::default().clone_dynamic()), + Box::new(Struct1::default().to_dynamic_struct()), + Box::new(Struct16::default().to_dynamic_struct()), + Box::new(Struct32::default().to_dynamic_struct()), + Box::new(Struct64::default().to_dynamic_struct()), + Box::new(Struct128::default().to_dynamic_struct()), ]; for s in structs { @@ -234,7 +234,7 @@ fn dynamic_struct_clone(criterion: &mut Criterion) { BenchmarkId::from_parameter(field_count), &s, |bencher, s| { - bencher.iter(|| s.clone_dynamic()); + bencher.iter(|| s.to_dynamic_struct()); }, ); } @@ -265,7 +265,7 @@ fn dynamic_struct_apply(criterion: &mut Criterion) { &patch, |bencher, patch| { bencher.iter_batched( - || (base.clone_dynamic(), patch()), + || (base.to_dynamic_struct(), patch()), |(mut base, patch)| base.apply(black_box(&*patch)), BatchSize::SmallInput, ); @@ -289,7 +289,7 @@ fn dynamic_struct_apply(criterion: &mut Criterion) { } bencher.iter_batched( - || base.clone_dynamic(), + || base.to_dynamic_struct(), |mut base| base.apply(black_box(&patch)), BatchSize::SmallInput, ); @@ -315,7 +315,7 @@ fn dynamic_struct_insert(criterion: &mut Criterion) { let field = format!("field_{}", field_count); bencher.iter_batched( - || s.clone_dynamic(), + || s.to_dynamic_struct(), |mut s| { s.insert(black_box(&field), ()); }, diff --git a/benches/benches/bevy_render/compute_normals.rs b/benches/benches/bevy_render/compute_normals.rs new file mode 100644 index 0000000000..41bda05de9 --- /dev/null +++ b/benches/benches/bevy_render/compute_normals.rs @@ -0,0 +1,96 @@ +use core::hint::black_box; + +use criterion::{criterion_group, Criterion}; +use rand::random; +use std::time::{Duration, Instant}; + +use bevy_render::{ + mesh::{Indices, Mesh, PrimitiveTopology}, + render_asset::RenderAssetUsages, +}; + +const GRID_SIZE: usize = 256; + +fn compute_normals(c: &mut Criterion) { + let indices = Indices::U32( + (0..GRID_SIZE - 1) + .flat_map(|i| std::iter::repeat(i).zip(0..GRID_SIZE - 1)) + .flat_map(|(i, j)| { + let tl = ((GRID_SIZE * j) + i) as u32; + let tr = tl + 1; + let bl = ((GRID_SIZE * (j + 1)) + i) as u32; + let br = bl + 1; + [tl, bl, tr, tr, bl, br] + }) + .collect(), + ); + + let new_mesh = || { + let positions = (0..GRID_SIZE) + .flat_map(|i| std::iter::repeat(i).zip(0..GRID_SIZE)) + .map(|(i, j)| [i as f32, j as f32, random::()]) + .collect::>(); + Mesh::new( + PrimitiveTopology::TriangleList, + RenderAssetUsages::MAIN_WORLD, + ) + .with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions) + .with_inserted_indices(indices.clone()) + }; + + c.bench_function("smooth_normals", |b| { + b.iter_custom(|iters| { + let mut total = Duration::default(); + for _ in 0..iters { + let mut mesh = new_mesh(); + black_box(mesh.attribute(Mesh::ATTRIBUTE_NORMAL)); + let start = Instant::now(); + mesh.compute_smooth_normals(); + let end = Instant::now(); + black_box(mesh.attribute(Mesh::ATTRIBUTE_NORMAL)); + total += end.duration_since(start); + } + total + }); + }); + + c.bench_function("face_weighted_normals", |b| { + b.iter_custom(|iters| { + let mut total = Duration::default(); + for _ in 0..iters { + let mut mesh = new_mesh(); + black_box(mesh.attribute(Mesh::ATTRIBUTE_NORMAL)); + let start = Instant::now(); + mesh.compute_smooth_normals(); + let end = Instant::now(); + black_box(mesh.attribute(Mesh::ATTRIBUTE_NORMAL)); + total += end.duration_since(start); + } + total + }); + }); + + let new_mesh = || { + new_mesh() + .with_duplicated_vertices() + .with_computed_flat_normals() + }; + + c.bench_function("flat_normals", |b| { + b.iter_custom(|iters| { + let mut total = Duration::default(); + for _ in 0..iters { + let mut mesh = new_mesh(); + black_box(mesh.attribute(Mesh::ATTRIBUTE_NORMAL)); + let start = Instant::now(); + mesh.compute_flat_normals(); + let end = Instant::now(); + black_box(mesh.attribute(Mesh::ATTRIBUTE_NORMAL)); + total += end.duration_since(start); + } + total + }); + }); +} + +criterion_group!(benches, compute_normals); diff --git a/benches/benches/bevy_render/main.rs b/benches/benches/bevy_render/main.rs index 7a369bc905..e335670222 100644 --- a/benches/benches/bevy_render/main.rs +++ b/benches/benches/bevy_render/main.rs @@ -1,6 +1,11 @@ use criterion::criterion_main; +mod compute_normals; mod render_layers; mod torus; -criterion_main!(render_layers::benches, torus::benches); +criterion_main!( + render_layers::benches, + compute_normals::benches, + torus::benches +); diff --git a/clippy.toml b/clippy.toml index 26b39b4e84..2c98e8ed02 100644 --- a/clippy.toml +++ b/clippy.toml @@ -43,3 +43,6 @@ disallowed-methods = [ { path = "f32::atanh", reason = "use bevy_math::ops::atanh instead for libm determinism" }, { path = "criterion::black_box", reason = "use core::hint::black_box instead" }, ] + +# Require `bevy_ecs::children!` to use `[]` braces, instead of `()` or `{}`. +standard-macro-braces = [{ name = "children", brace = "[" }] diff --git a/crates/bevy_a11y/Cargo.toml b/crates/bevy_a11y/Cargo.toml index 0037ccf29d..759cf3e787 100644 --- a/crates/bevy_a11y/Cargo.toml +++ b/crates/bevy_a11y/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_a11y" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides accessibility support for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -18,28 +18,17 @@ bevy_reflect = [ "dep:bevy_reflect", "bevy_app/bevy_reflect", "bevy_ecs/bevy_reflect", - "bevy_input_focus/bevy_reflect", ] ## Adds serialization support through `serde`. -serialize = [ - "dep:serde", - "bevy_ecs/serialize", - "bevy_input_focus/serialize", - "accesskit/serde", -] +serialize = ["dep:serde", "bevy_ecs/serialize", "accesskit/serde"] # Platform Compatibility ## Allows access to the `std` crate. Enabling this feature will prevent compilation ## on `no_std` targets, but provides access to certain additional features on ## supported platforms. -std = [ - "bevy_app/std", - "bevy_ecs/std", - "bevy_reflect/std", - "bevy_input_focus/std", -] +std = ["bevy_app/std", "bevy_ecs/std", "bevy_reflect/std"] ## `critical-section` provides the building blocks for synchronization primitives ## on all platforms, including `no_std`. @@ -47,31 +36,17 @@ critical-section = [ "bevy_app/critical-section", "bevy_ecs/critical-section", "bevy_reflect?/critical-section", - "bevy_input_focus/critical-section", ] -## `portable-atomic` provides additional platform support for atomic types and -## operations, even on targets without native support. -portable-atomic = [ - "bevy_app/portable-atomic", - "bevy_ecs/portable-atomic", - "bevy_reflect?/portable-atomic", - "bevy_input_focus/portable-atomic", -] - -## Uses the `libm` maths library instead of the one provided in `std` and `core`. -libm = ["bevy_input_focus/libm"] - [dependencies] # bevy bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false } bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev", default-features = false } bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, optional = true } -bevy_input_focus = { path = "../bevy_input_focus", version = "0.16.0-dev", default-features = false } # other -accesskit = { version = "0.17", default-features = false } +accesskit = { version = "0.18", default-features = false } serde = { version = "1", default-features = false, features = [ "alloc", ], optional = true } diff --git a/crates/bevy_a11y/src/lib.rs b/crates/bevy_a11y/src/lib.rs index ccc7edf536..910ec3ca35 100644 --- a/crates/bevy_a11y/src/lib.rs +++ b/crates/bevy_a11y/src/lib.rs @@ -54,7 +54,11 @@ pub struct ActionRequest(pub accesskit::ActionRequest); /// Useful if a third-party plugin needs to conditionally integrate with /// `AccessKit` #[derive(Resource, Default, Clone, Debug, Deref, DerefMut)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Default, Resource))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Default, Clone, Resource) +)] pub struct AccessibilityRequested(Arc); impl AccessibilityRequested { @@ -78,7 +82,11 @@ impl AccessibilityRequested { /// will generate conflicting updates. #[derive(Resource, Clone, Debug, Deref, DerefMut)] #[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Resource))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Resource, Clone, Default) +)] #[cfg_attr( all(feature = "bevy_reflect", feature = "serialize"), reflect(Serialize, Deserialize) @@ -127,7 +135,7 @@ impl From for AccessibilityNode { #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] #[cfg_attr( all(feature = "bevy_reflect", feature = "serialize"), - reflect(Serialize, Deserialize) + reflect(Serialize, Deserialize, Clone) )] pub enum AccessibilitySystem { /// Update the accessibility tree diff --git a/crates/bevy_animation/Cargo.toml b/crates/bevy_animation/Cargo.toml index 5b906fed7c..11e819806c 100644 --- a/crates/bevy_animation/Cargo.toml +++ b/crates/bevy_animation/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_animation" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides animation functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -16,8 +16,8 @@ bevy_color = { path = "../bevy_color", version = "0.16.0-dev" } bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } bevy_log = { path = "../bevy_log", version = "0.16.0-dev" } bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_mesh = { path = "../bevy_mesh", version = "0.16.0-dev" } bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ - "bevy", "petgraph", ] } bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } @@ -25,13 +25,13 @@ bevy_time = { path = "../bevy_time", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", "serialize", ] } # other -petgraph = { version = "0.6", features = ["serde-1"] } +petgraph = { version = "0.7", features = ["serde-1"] } ron = "0.8" serde = "1" blake3 = { version = "1.0" } @@ -45,6 +45,7 @@ smallvec = "1" tracing = { version = "0.1", default-features = false, features = ["std"] } [target.'cfg(target_arch = "wasm32")'.dependencies] +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. uuid = { version = "1.13.1", default-features = false, features = ["js"] } [lints] diff --git a/crates/bevy_animation/src/animation_curves.rs b/crates/bevy_animation/src/animation_curves.rs index 4c944cdf6d..45fa393e05 100644 --- a/crates/bevy_animation/src/animation_curves.rs +++ b/crates/bevy_animation/src/animation_curves.rs @@ -100,43 +100,48 @@ use bevy_math::curve::{ iterable::IterableCurve, Curve, Interval, }; -use bevy_platform_support::hash::Hashed; +use bevy_mesh::morph::MorphWeights; +use bevy_platform::hash::Hashed; use bevy_reflect::{FromReflect, Reflect, Reflectable, TypeInfo, Typed}; -use bevy_render::mesh::morph::MorphWeights; use downcast_rs::{impl_downcast, Downcast}; -/// A value on a component that Bevy can animate. +/// A trait for exposing a value in an entity so that it can be animated. /// -/// You can implement this trait on a unit struct in order to support animating -/// custom components other than transforms and morph weights. Use that type in -/// conjunction with [`AnimatableCurve`] (and perhaps [`AnimatableKeyframeCurve`] -/// to define the animation itself). -/// For example, in order to animate field of view, you might use: +/// `AnimatableProperty` allows any value contained in an entity to be animated +/// as long as it can be obtained by mutable reference. This makes it more +/// flexible than [`animated_field`]. +/// +/// [`animated_field`]: crate::animated_field +/// +/// Here, `AnimatableProperty` is used to animate a value inside an `Option`, +/// returning an error if the option is `None`. /// /// # use bevy_animation::{prelude::AnimatableProperty, AnimationEntityMut, AnimationEvaluationError, animation_curves::EvaluatorId}; -/// # use bevy_reflect::Reflect; +/// # use bevy_ecs::component::Component; /// # use std::any::TypeId; -/// # use bevy_render::camera::{Projection, PerspectiveProjection}; -/// #[derive(Reflect)] -/// struct FieldOfViewProperty; +/// #[derive(Component)] +/// struct ExampleComponent { +/// power_level: Option +/// } /// -/// impl AnimatableProperty for FieldOfViewProperty { +/// #[derive(Clone)] +/// struct PowerLevelProperty; +/// +/// impl AnimatableProperty for PowerLevelProperty { /// type Property = f32; -/// fn get_mut<'a>(&self, entity: &'a mut AnimationEntityMut) -> Result<&'a mut Self::Property, AnimationEvaluationError> { +/// fn get_mut<'a>( +/// &self, +/// entity: &'a mut AnimationEntityMut +/// ) -> Result<&'a mut Self::Property, AnimationEvaluationError> { /// let component = entity -/// .get_mut::() -/// .ok_or(AnimationEvaluationError::ComponentNotPresent(TypeId::of::< -/// Projection, -/// >( -/// )))? +/// .get_mut::() +/// .ok_or(AnimationEvaluationError::ComponentNotPresent( +/// TypeId::of::() +/// ))? /// .into_inner(); -/// match component { -/// Projection::Perspective(perspective) => Ok(&mut perspective.fov), -/// _ => Err(AnimationEvaluationError::PropertyNotPresent(TypeId::of::< -/// PerspectiveProjection, -/// >( -/// ))), -/// } +/// component.power_level.as_mut().ok_or(AnimationEvaluationError::PropertyNotPresent( +/// TypeId::of::>() +/// )) /// } /// /// fn evaluator_id(&self) -> EvaluatorId { @@ -144,58 +149,44 @@ use downcast_rs::{impl_downcast, Downcast}; /// } /// } /// -/// You can then create an [`AnimationClip`] to animate this property like so: /// -/// # use bevy_animation::{AnimationClip, AnimationTargetId, VariableCurve, AnimationEntityMut, AnimationEvaluationError, animation_curves::EvaluatorId}; +/// You can then create an [`AnimatableCurve`] to animate this property like so: +/// +/// # use bevy_animation::{VariableCurve, AnimationEntityMut, AnimationEvaluationError, animation_curves::EvaluatorId}; /// # use bevy_animation::prelude::{AnimatableProperty, AnimatableKeyframeCurve, AnimatableCurve}; -/// # use bevy_ecs::name::Name; -/// # use bevy_reflect::Reflect; -/// # use bevy_render::camera::{Projection, PerspectiveProjection}; +/// # use bevy_ecs::{name::Name, component::Component}; /// # use std::any::TypeId; -/// # let animation_target_id = AnimationTargetId::from(&Name::new("Test")); -/// # #[derive(Reflect, Clone)] -/// # struct FieldOfViewProperty; -/// # impl AnimatableProperty for FieldOfViewProperty { -/// # type Property = f32; -/// # fn get_mut<'a>(&self, entity: &'a mut AnimationEntityMut) -> Result<&'a mut Self::Property, AnimationEvaluationError> { -/// # let component = entity -/// # .get_mut::() -/// # .ok_or(AnimationEvaluationError::ComponentNotPresent(TypeId::of::< -/// # Projection, -/// # >( -/// # )))? -/// # .into_inner(); -/// # match component { -/// # Projection::Perspective(perspective) => Ok(&mut perspective.fov), -/// # _ => Err(AnimationEvaluationError::PropertyNotPresent(TypeId::of::< -/// # PerspectiveProjection, -/// # >( -/// # ))), -/// # } -/// # } -/// # fn evaluator_id(&self) -> EvaluatorId { -/// # EvaluatorId::Type(TypeId::of::()) -/// # } +/// # #[derive(Component)] +/// # struct ExampleComponent { power_level: Option } +/// # #[derive(Clone)] +/// # struct PowerLevelProperty; +/// # impl AnimatableProperty for PowerLevelProperty { +/// # type Property = f32; +/// # fn get_mut<'a>( +/// # &self, +/// # entity: &'a mut AnimationEntityMut +/// # ) -> Result<&'a mut Self::Property, AnimationEvaluationError> { +/// # let component = entity +/// # .get_mut::() +/// # .ok_or(AnimationEvaluationError::ComponentNotPresent( +/// # TypeId::of::() +/// # ))? +/// # .into_inner(); +/// # component.power_level.as_mut().ok_or(AnimationEvaluationError::PropertyNotPresent( +/// # TypeId::of::>() +/// # )) +/// # } +/// # fn evaluator_id(&self) -> EvaluatorId { +/// # EvaluatorId::Type(TypeId::of::()) +/// # } /// # } -/// let mut animation_clip = AnimationClip::default(); -/// animation_clip.add_curve_to_target( -/// animation_target_id, -/// AnimatableCurve::new( -/// FieldOfViewProperty, -/// AnimatableKeyframeCurve::new([ -/// (0.0, core::f32::consts::PI / 4.0), -/// (1.0, core::f32::consts::PI / 3.0), -/// ]).expect("Failed to create font size curve") -/// ) +/// AnimatableCurve::new( +/// PowerLevelProperty, +/// AnimatableKeyframeCurve::new([ +/// (0.0, 0.0), +/// (1.0, 9001.0), +/// ]).expect("Failed to create power level curve") /// ); -/// -/// Here, the use of [`AnimatableKeyframeCurve`] creates a curve out of the given keyframe time-value -/// pairs, using the [`Animatable`] implementation of `f32` to interpolate between them. The -/// invocation of [`AnimatableCurve::new`] with `FieldOfViewProperty` indicates that the `f32` -/// output from that curve is to be used to animate the font size of a `PerspectiveProjection` component (as -/// configured above). -/// -/// [`AnimationClip`]: crate::AnimationClip pub trait AnimatableProperty: Send + Sync + 'static { /// The animated property type. type Property: Animatable; diff --git a/crates/bevy_animation/src/gltf_curves.rs b/crates/bevy_animation/src/gltf_curves.rs index d5b2cbb6b9..688011a32c 100644 --- a/crates/bevy_animation/src/gltf_curves.rs +++ b/crates/bevy_animation/src/gltf_curves.rs @@ -111,6 +111,7 @@ impl CubicKeyframeCurve { /// A keyframe-defined curve that uses cubic spline interpolation, special-cased for quaternions /// since it uses `Vec4` internally. #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub struct CubicRotationCurve { // Note: The sample width here should be 3. core: ChunkedUnevenCore, @@ -372,8 +373,9 @@ impl WideCubicKeyframeCurve { /// recommended to use its implementation of the [`IterableCurve`] trait, which allows iterating /// directly over information derived from the curve without allocating. /// -/// [`MorphWeights`]: bevy_render::prelude::MorphWeights +/// [`MorphWeights`]: bevy_mesh::morph::MorphWeights #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub enum WeightsCurve { /// A curve which takes a constant value over its domain. Notably, this is how animations with /// only a single keyframe are interpreted. diff --git a/crates/bevy_animation/src/graph.rs b/crates/bevy_animation/src/graph.rs index 1729841f02..aa6d252fee 100644 --- a/crates/bevy_animation/src/graph.rs +++ b/crates/bevy_animation/src/graph.rs @@ -17,7 +17,7 @@ use bevy_ecs::{ resource::Resource, system::{Res, ResMut}, }; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::{prelude::ReflectDefault, Reflect, ReflectSerialize}; use derive_more::derive::From; use petgraph::{ @@ -108,7 +108,7 @@ use crate::{AnimationClip, AnimationTargetId}; /// /// [RFC 51]: https://github.com/bevyengine/rfcs/blob/main/rfcs/51-animation-composition.md #[derive(Asset, Reflect, Clone, Debug, Serialize)] -#[reflect(Serialize, Debug)] +#[reflect(Serialize, Debug, Clone)] #[serde(into = "SerializedAnimationGraph")] pub struct AnimationGraph { /// The `petgraph` data structure that defines the animation graph. @@ -131,7 +131,7 @@ pub struct AnimationGraph { /// A [`Handle`] to the [`AnimationGraph`] to be used by the [`AnimationPlayer`](crate::AnimationPlayer) on the same entity. #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct AnimationGraphHandle(pub Handle); impl From for AssetId { @@ -164,6 +164,7 @@ pub type AnimationNodeIndex = NodeIndex; /// of the graph, contain animation clips to play. Blend and add nodes describe /// how to combine their children to produce a final animation. #[derive(Clone, Reflect, Debug)] +#[reflect(Clone)] pub struct AnimationGraphNode { /// Animation node data specific to the type of node (clip, blend, or add). /// @@ -205,6 +206,7 @@ pub struct AnimationGraphNode { /// In the case of clip nodes, this contains the actual animation clip /// associated with the node. #[derive(Clone, Default, Reflect, Debug)] +#[reflect(Clone)] pub enum AnimationNodeType { /// A *clip node*, which plays an animation clip. /// @@ -884,10 +886,10 @@ impl ThreadedAnimationGraph { self.sorted_edge_ranges.clear(); self.sorted_edge_ranges - .extend(iter::repeat(0..0).take(node_count)); + .extend(iter::repeat_n(0..0, node_count)); self.computed_masks.clear(); - self.computed_masks.extend(iter::repeat(0).take(node_count)); + self.computed_masks.extend(iter::repeat_n(0, node_count)); } /// Recursively constructs the [`ThreadedAnimationGraph`] for the subtree diff --git a/crates/bevy_animation/src/lib.rs b/crates/bevy_animation/src/lib.rs index 8663ea3f3f..43ea343aa3 100644 --- a/crates/bevy_animation/src/lib.rs +++ b/crates/bevy_animation/src/lib.rs @@ -35,7 +35,7 @@ use bevy_app::{Animation, App, Plugin, PostUpdate}; use bevy_asset::{Asset, AssetApp, AssetEvents, Assets}; use bevy_ecs::{prelude::*, world::EntityMutExcept}; use bevy_math::FloatOrd; -use bevy_platform_support::{collections::HashMap, hash::NoOpHash}; +use bevy_platform::{collections::HashMap, hash::NoOpHash}; use bevy_reflect::{prelude::ReflectDefault, Reflect, TypePath}; use bevy_time::Time; use bevy_transform::TransformSystem; @@ -96,23 +96,26 @@ impl VariableCurve { /// Because animation clips refer to targets by UUID, they can target any /// [`AnimationTarget`] with that ID. #[derive(Asset, Reflect, Clone, Debug, Default)] +#[reflect(Clone, Default)] pub struct AnimationClip { // This field is ignored by reflection because AnimationCurves can contain things that are not reflect-able - #[reflect(ignore)] + #[reflect(ignore, clone)] curves: AnimationCurves, events: AnimationEvents, duration: f32, } #[derive(Reflect, Debug, Clone)] +#[reflect(Clone)] struct TimedAnimationEvent { time: f32, event: AnimationEvent, } #[derive(Reflect, Debug, Clone)] +#[reflect(Clone)] struct AnimationEvent { - #[reflect(ignore)] + #[reflect(ignore, clone)] trigger: AnimationEventFn, } @@ -124,6 +127,7 @@ impl AnimationEvent { #[derive(Reflect, Clone)] #[reflect(opaque)] +#[reflect(Clone, Default, Debug)] struct AnimationEventFn(Arc); impl Default for AnimationEventFn { @@ -139,6 +143,7 @@ impl Debug for AnimationEventFn { } #[derive(Reflect, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone)] +#[reflect(Clone)] enum AnimationEventTarget { Root, Node(AnimationTargetId), @@ -172,6 +177,7 @@ pub type AnimationCurves = HashMap, NoOpHa /// /// [UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Reflect, Debug, Serialize, Deserialize)] +#[reflect(Clone)] pub struct AnimationTargetId(pub Uuid); impl Hash for AnimationTargetId { @@ -203,7 +209,7 @@ impl Hash for AnimationTargetId { /// time. However, you can change [`AnimationTarget`]'s `player` property at /// runtime to change which player is responsible for animating the entity. #[derive(Clone, Copy, Component, Reflect)] -#[reflect(Component)] +#[reflect(Component, Clone)] pub struct AnimationTarget { /// The ID of this animation target. /// @@ -425,6 +431,7 @@ impl AnimationClip { /// Repetition behavior of an animation. #[derive(Reflect, Debug, PartialEq, Eq, Copy, Clone, Default)] +#[reflect(Clone, Default)] pub enum RepeatAnimation { /// The animation will finish after running once. #[default] @@ -462,6 +469,7 @@ pub enum AnimationEvaluationError { /// /// A stopped animation is considered no longer active. #[derive(Debug, Clone, Copy, Reflect)] +#[reflect(Clone, Default)] pub struct ActiveAnimation { /// The factor by which the weight from the [`AnimationGraph`] is multiplied. weight: f32, @@ -674,10 +682,9 @@ impl ActiveAnimation { /// Automatically added to any root animations of a scene when it is /// spawned. #[derive(Component, Default, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct AnimationPlayer { active_animations: HashMap, - blend_weights: HashMap, } // This is needed since `#[derive(Clone)]` does not generate optimized `clone_from`. @@ -685,13 +692,11 @@ impl Clone for AnimationPlayer { fn clone(&self) -> Self { Self { active_animations: self.active_animations.clone(), - blend_weights: self.blend_weights.clone(), } } fn clone_from(&mut self, source: &Self) { self.active_animations.clone_from(&source.active_animations); - self.blend_weights.clone_from(&source.blend_weights); } } @@ -750,10 +755,10 @@ impl AnimationCurveEvaluators { .component_property_curve_evaluators .get_or_insert_with(component_property, func), EvaluatorId::Type(type_id) => match self.type_id_curve_evaluators.entry(type_id) { - bevy_platform_support::collections::hash_map::Entry::Occupied(occupied_entry) => { + bevy_platform::collections::hash_map::Entry::Occupied(occupied_entry) => { &mut **occupied_entry.into_mut() } - bevy_platform_support::collections::hash_map::Entry::Vacant(vacant_entry) => { + bevy_platform::collections::hash_map::Entry::Vacant(vacant_entry) => { &mut **vacant_entry.insert(func()) } }, @@ -1525,6 +1530,8 @@ impl<'a> Iterator for TriggeredEventsIter<'a> { #[cfg(test)] mod tests { + use bevy_reflect::{DynamicMap, Map}; + use super::*; #[derive(Event, Reflect, Clone)] @@ -1656,4 +1663,13 @@ mod tests { active_animation.update(clip.duration, clip.duration); // 0.3 : 0.0 assert_triggered_events_with(&active_animation, &clip, [0.3, 0.2]); } + + #[test] + fn test_animation_node_index_as_key_of_dynamic_map() { + let mut map = DynamicMap::default(); + map.insert_boxed( + Box::new(AnimationNodeIndex::new(0)), + Box::new(ActiveAnimation::default()), + ); + } } diff --git a/crates/bevy_animation/src/transition.rs b/crates/bevy_animation/src/transition.rs index c94378208b..4948559704 100644 --- a/crates/bevy_animation/src/transition.rs +++ b/crates/bevy_animation/src/transition.rs @@ -29,7 +29,7 @@ use crate::{graph::AnimationNodeIndex, ActiveAnimation, AnimationPlayer}; /// component to get confused about which animation is the "main" animation, and /// transitions will usually be incorrect as a result. #[derive(Component, Default, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct AnimationTransitions { main_animation: Option, transitions: Vec, @@ -52,6 +52,7 @@ impl Clone for AnimationTransitions { /// An animation that is being faded out as part of a transition #[derive(Debug, Clone, Copy, Reflect)] +#[reflect(Clone)] pub struct AnimationTransition { /// The current weight. Starts at 1.0 and goes to 0.0 during the fade-out. current_weight: f32, @@ -117,8 +118,9 @@ pub fn advance_transitions( // is divided between all the other layers, eventually culminating in the // currently-playing animation receiving whatever's left. This results in a // nicely normalized weight. - let mut remaining_weight = 1.0; for (mut animation_transitions, mut player) in query.iter_mut() { + let mut remaining_weight = 1.0; + for transition in &mut animation_transitions.transitions.iter_mut().rev() { // Decrease weight. transition.current_weight = (transition.current_weight diff --git a/crates/bevy_anti_aliasing/Cargo.toml b/crates/bevy_anti_aliasing/Cargo.toml new file mode 100644 index 0000000000..5a8e48ecb5 --- /dev/null +++ b/crates/bevy_anti_aliasing/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "bevy_anti_aliasing" +version = "0.16.0-dev" +edition = "2024" +description = "Provides various anti aliasing implementations for Bevy Engine" +homepage = "https://bevyengine.org" +repository = "https://github.com/bevyengine/bevy" +license = "MIT OR Apache-2.0" +keywords = ["bevy"] + +[features] +trace = [] +webgl = [] +webgpu = [] +smaa_luts = ["bevy_render/ktx2", "bevy_image/ktx2", "bevy_image/zstd"] + +[dependencies] +# bevy +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.16.0-dev" } +bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.16.0-dev" } + +# other +tracing = { version = "0.1", default-features = false, features = ["std"] } + +[lints] +workspace = true + +[package.metadata.docs.rs] +rustdoc-args = ["-Zunstable-options", "--generate-link-to-definition"] +all-features = true diff --git a/crates/bevy_platform_support/LICENSE-APACHE b/crates/bevy_anti_aliasing/LICENSE-APACHE similarity index 100% rename from crates/bevy_platform_support/LICENSE-APACHE rename to crates/bevy_anti_aliasing/LICENSE-APACHE diff --git a/crates/bevy_platform_support/LICENSE-MIT b/crates/bevy_anti_aliasing/LICENSE-MIT similarity index 100% rename from crates/bevy_platform_support/LICENSE-MIT rename to crates/bevy_anti_aliasing/LICENSE-MIT diff --git a/crates/bevy_anti_aliasing/README.md b/crates/bevy_anti_aliasing/README.md new file mode 100644 index 0000000000..ba0123c31b --- /dev/null +++ b/crates/bevy_anti_aliasing/README.md @@ -0,0 +1,7 @@ +# Bevy Anti Aliasing + +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy_core_pipeline.svg)](https://crates.io/crates/bevy_core_pipeline) +[![Downloads](https://img.shields.io/crates/d/bevy_core_pipeline.svg)](https://crates.io/crates/bevy_core_pipeline) +[![Docs](https://docs.rs/bevy_core_pipeline/badge.svg)](https://docs.rs/bevy_core_pipeline/latest/bevy_core_pipeline/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) diff --git a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/mod.rs b/crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/mod.rs similarity index 98% rename from crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/mod.rs rename to crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/mod.rs index 5861084694..a07b5e2239 100644 --- a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/mod.rs +++ b/crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/mod.rs @@ -1,10 +1,10 @@ -use crate::{ +use bevy_app::prelude::*; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_core_pipeline::{ core_2d::graph::{Core2d, Node2d}, core_3d::graph::{Core3d, Node3d}, fullscreen_vertex_shader::fullscreen_shader_vertex_state, }; -use bevy_app::prelude::*; -use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_ecs::{prelude::*, query::QueryItem}; use bevy_image::BevyDefault as _; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -36,7 +36,7 @@ pub use node::CasNode; /// /// To use this, add the [`ContrastAdaptiveSharpening`] component to a 2D or 3D camera. #[derive(Component, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct ContrastAdaptiveSharpening { /// Enable or disable sharpening. pub enabled: bool, @@ -65,7 +65,7 @@ impl Default for ContrastAdaptiveSharpening { } #[derive(Component, Default, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct DenoiseCas(bool); /// The uniform struct extracted from [`ContrastAdaptiveSharpening`] attached to a [`Camera`]. diff --git a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs b/crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/node.rs similarity index 100% rename from crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs rename to crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/node.rs diff --git a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl b/crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl similarity index 100% rename from crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl rename to crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl diff --git a/crates/bevy_anti_aliasing/src/experimental/mod.rs b/crates/bevy_anti_aliasing/src/experimental/mod.rs new file mode 100644 index 0000000000..a8dc522c56 --- /dev/null +++ b/crates/bevy_anti_aliasing/src/experimental/mod.rs @@ -0,0 +1,9 @@ +//! Experimental rendering features. +//! +//! Experimental features are features with known problems, missing features, +//! compatibility issues, low performance, and/or future breaking changes, but +//! are included nonetheless for testing purposes. + +pub mod taa { + pub use crate::taa::{TemporalAntiAliasNode, TemporalAntiAliasPlugin, TemporalAntiAliasing}; +} diff --git a/crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl b/crates/bevy_anti_aliasing/src/fxaa/fxaa.wgsl similarity index 100% rename from crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl rename to crates/bevy_anti_aliasing/src/fxaa/fxaa.wgsl diff --git a/crates/bevy_core_pipeline/src/fxaa/mod.rs b/crates/bevy_anti_aliasing/src/fxaa/mod.rs similarity index 98% rename from crates/bevy_core_pipeline/src/fxaa/mod.rs rename to crates/bevy_anti_aliasing/src/fxaa/mod.rs index b8904bb1f9..6d7824cf21 100644 --- a/crates/bevy_core_pipeline/src/fxaa/mod.rs +++ b/crates/bevy_anti_aliasing/src/fxaa/mod.rs @@ -1,10 +1,10 @@ -use crate::{ +use bevy_app::prelude::*; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_core_pipeline::{ core_2d::graph::{Core2d, Node2d}, core_3d::graph::{Core3d, Node3d}, fullscreen_vertex_shader::fullscreen_shader_vertex_state, }; -use bevy_app::prelude::*; -use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_ecs::prelude::*; use bevy_image::BevyDefault as _; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -27,7 +27,7 @@ mod node; pub use node::FxaaNode; #[derive(Debug, Reflect, Eq, PartialEq, Hash, Clone, Copy)] -#[reflect(PartialEq, Hash)] +#[reflect(PartialEq, Hash, Clone)] pub enum Sensitivity { Low, Medium, @@ -51,7 +51,7 @@ impl Sensitivity { /// A component for enabling Fast Approximate Anti-Aliasing (FXAA) /// for a [`bevy_render::camera::Camera`]. #[derive(Reflect, Component, Clone, ExtractComponent)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] #[extract_component_filter(With)] #[doc(alias = "FastApproximateAntiAliasing")] pub struct Fxaa { diff --git a/crates/bevy_core_pipeline/src/fxaa/node.rs b/crates/bevy_anti_aliasing/src/fxaa/node.rs similarity index 100% rename from crates/bevy_core_pipeline/src/fxaa/node.rs rename to crates/bevy_anti_aliasing/src/fxaa/node.rs diff --git a/crates/bevy_anti_aliasing/src/lib.rs b/crates/bevy_anti_aliasing/src/lib.rs new file mode 100644 index 0000000000..be09a2e5b2 --- /dev/null +++ b/crates/bevy_anti_aliasing/src/lib.rs @@ -0,0 +1,27 @@ +#![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")] +#![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc( + html_logo_url = "https://bevyengine.org/assets/icon.png", + html_favicon_url = "https://bevyengine.org/assets/icon.png" +)] + +use bevy_app::Plugin; +use contrast_adaptive_sharpening::CasPlugin; +use fxaa::FxaaPlugin; +use smaa::SmaaPlugin; + +pub mod contrast_adaptive_sharpening; +pub mod experimental; +pub mod fxaa; +pub mod smaa; + +mod taa; + +#[derive(Default)] +pub struct AntiAliasingPlugin; +impl Plugin for AntiAliasingPlugin { + fn build(&self, app: &mut bevy_app::App) { + app.add_plugins((FxaaPlugin, CasPlugin, SmaaPlugin)); + } +} diff --git a/crates/bevy_core_pipeline/src/smaa/SMAAAreaLUT.ktx2 b/crates/bevy_anti_aliasing/src/smaa/SMAAAreaLUT.ktx2 similarity index 100% rename from crates/bevy_core_pipeline/src/smaa/SMAAAreaLUT.ktx2 rename to crates/bevy_anti_aliasing/src/smaa/SMAAAreaLUT.ktx2 diff --git a/crates/bevy_core_pipeline/src/smaa/SMAASearchLUT.ktx2 b/crates/bevy_anti_aliasing/src/smaa/SMAASearchLUT.ktx2 similarity index 100% rename from crates/bevy_core_pipeline/src/smaa/SMAASearchLUT.ktx2 rename to crates/bevy_anti_aliasing/src/smaa/SMAASearchLUT.ktx2 diff --git a/crates/bevy_core_pipeline/src/smaa/mod.rs b/crates/bevy_anti_aliasing/src/smaa/mod.rs similarity index 99% rename from crates/bevy_core_pipeline/src/smaa/mod.rs rename to crates/bevy_anti_aliasing/src/smaa/mod.rs index fb066a3ce5..f1e4d28678 100644 --- a/crates/bevy_core_pipeline/src/smaa/mod.rs +++ b/crates/bevy_anti_aliasing/src/smaa/mod.rs @@ -29,16 +29,16 @@ //! * Compatibility with SSAA and MSAA. //! //! [SMAA]: https://www.iryoku.com/smaa/ -#[cfg(not(feature = "smaa_luts"))] -use crate::tonemapping::lut_placeholder; -use crate::{ - core_2d::graph::{Core2d, Node2d}, - core_3d::graph::{Core3d, Node3d}, -}; use bevy_app::{App, Plugin}; #[cfg(feature = "smaa_luts")] use bevy_asset::load_internal_binary_asset; use bevy_asset::{load_internal_asset, weak_handle, Handle}; +#[cfg(not(feature = "smaa_luts"))] +use bevy_core_pipeline::tonemapping::lut_placeholder; +use bevy_core_pipeline::{ + core_2d::graph::{Core2d, Node2d}, + core_3d::graph::{Core3d, Node3d}, +}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ component::Component, @@ -46,7 +46,7 @@ use bevy_ecs::{ query::{QueryItem, With}, reflect::ReflectComponent, resource::Resource, - schedule::IntoSystemConfigs as _, + schedule::IntoScheduleConfigs as _, system::{lifetimeless::Read, Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; @@ -95,7 +95,7 @@ pub struct SmaaPlugin; /// A component for enabling Subpixel Morphological Anti-Aliasing (SMAA) /// for a [`bevy_render::camera::Camera`]. #[derive(Clone, Copy, Default, Component, Reflect, ExtractComponent)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] #[doc(alias = "SubpixelMorphologicalAntiAliasing")] pub struct Smaa { /// A predefined set of SMAA parameters: i.e. a quality level. @@ -110,7 +110,7 @@ pub struct Smaa { /// /// The default value is *high*. #[derive(Clone, Copy, Reflect, Default, PartialEq, Eq, Hash)] -#[reflect(Default)] +#[reflect(Default, Clone, PartialEq, Hash)] pub enum SmaaPreset { /// Four search steps; no diagonal or corner detection. Low, @@ -297,8 +297,6 @@ impl Plugin for SmaaPlugin { SMAA_AREA_LUT_TEXTURE_HANDLE, "SMAAAreaLUT.ktx2", |bytes, _: String| Image::from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] - "SMAAAreaLUT".to_owned(), bytes, bevy_image::ImageType::Format(bevy_image::ImageFormat::Ktx2), bevy_image::CompressedImageFormats::NONE, @@ -315,8 +313,6 @@ impl Plugin for SmaaPlugin { SMAA_SEARCH_LUT_TEXTURE_HANDLE, "SMAASearchLUT.ktx2", |bytes, _: String| Image::from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] - "SMAASearchLUT".to_owned(), bytes, bevy_image::ImageType::Format(bevy_image::ImageFormat::Ktx2), bevy_image::CompressedImageFormats::NONE, diff --git a/crates/bevy_core_pipeline/src/smaa/smaa.wgsl b/crates/bevy_anti_aliasing/src/smaa/smaa.wgsl similarity index 100% rename from crates/bevy_core_pipeline/src/smaa/smaa.wgsl rename to crates/bevy_anti_aliasing/src/smaa/smaa.wgsl diff --git a/crates/bevy_core_pipeline/src/taa/mod.rs b/crates/bevy_anti_aliasing/src/taa/mod.rs similarity index 99% rename from crates/bevy_core_pipeline/src/taa/mod.rs rename to crates/bevy_anti_aliasing/src/taa/mod.rs index 55eb25ae02..cf5ac269e2 100644 --- a/crates/bevy_core_pipeline/src/taa/mod.rs +++ b/crates/bevy_anti_aliasing/src/taa/mod.rs @@ -1,17 +1,17 @@ -use crate::{ +use bevy_app::{App, Plugin}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_core_pipeline::{ core_3d::graph::{Core3d, Node3d}, fullscreen_vertex_shader::fullscreen_shader_vertex_state, prelude::Camera3d, prepass::{DepthPrepass, MotionVectorPrepass, ViewPrepassTextures}, }; -use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_diagnostic::FrameCount; use bevy_ecs::{ - prelude::{require, Component, Entity, ReflectComponent}, + prelude::{Component, Entity, ReflectComponent}, query::{QueryItem, With}, resource::Resource, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; @@ -131,7 +131,7 @@ impl Plugin for TemporalAntiAliasPlugin { /// /// If no [`MipBias`] component is attached to the camera, TAA will add a `MipBias(-1.0)` component. #[derive(Component, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] #[require(TemporalJitter, DepthPrepass, MotionVectorPrepass)] #[doc(alias = "Taa")] pub struct TemporalAntiAliasing { diff --git a/crates/bevy_core_pipeline/src/taa/taa.wgsl b/crates/bevy_anti_aliasing/src/taa/taa.wgsl similarity index 100% rename from crates/bevy_core_pipeline/src/taa/taa.wgsl rename to crates/bevy_anti_aliasing/src/taa/taa.wgsl diff --git a/crates/bevy_app/Cargo.toml b/crates/bevy_app/Cargo.toml index e4fb9748c7..f46db94db3 100644 --- a/crates/bevy_app/Cargo.toml +++ b/crates/bevy_app/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_app" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides core App functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -9,7 +9,7 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [features] -default = ["std", "bevy_reflect", "bevy_tasks", "bevy_ecs/default"] +default = ["std", "bevy_reflect", "bevy_ecs/default", "error_panic_hook"] # Functionality @@ -23,9 +23,6 @@ reflect_functions = [ "bevy_ecs/reflect_functions", ] -## Adds support for running async background tasks -bevy_tasks = ["dep:bevy_tasks"] - # Debugging Features ## Enables `tracing` integration, allowing spans and other metrics to be reported @@ -36,6 +33,10 @@ trace = ["dep:tracing"] ## other debug operations which can help with diagnosing certain behaviors. bevy_debug_stepping = [] +## Will set the BevyError panic hook, which gives cleaner filtered backtraces when +## a BevyError is hit. +error_panic_hook = [] + # Platform Compatibility ## Allows access to the `std` crate. Enabling this feature will prevent compilation @@ -47,26 +48,28 @@ std = [ "dep:ctrlc", "downcast-rs/std", "bevy_utils/std", - "bevy_tasks?/std", - "bevy_platform_support/std", + "bevy_tasks/std", + "bevy_platform/std", ] ## `critical-section` provides the building blocks for synchronization primitives ## on all platforms, including `no_std`. critical-section = [ - "bevy_tasks?/critical-section", + "bevy_tasks/critical-section", "bevy_ecs/critical-section", - "bevy_platform_support/critical-section", + "bevy_platform/critical-section", "bevy_reflect?/critical-section", ] -## `portable-atomic` provides additional platform support for atomic types and -## operations, even on targets without native support. -portable-atomic = [ - "bevy_tasks?/portable-atomic", - "bevy_ecs/portable-atomic", - "bevy_platform_support/portable-atomic", - "bevy_reflect?/portable-atomic", +## Enables use of browser APIs. +## Note this is currently only applicable on `wasm32` architectures. +web = [ + "bevy_platform/web", + "bevy_tasks/web", + "bevy_reflect?/web", + "dep:wasm-bindgen", + "dep:web-sys", + "dep:console_error_panic_hook", ] [dependencies] @@ -77,8 +80,8 @@ bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-featu bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev", default-features = false, features = [ "alloc", ] } -bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false, optional = true } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false } # other downcast-rs = { version = "2", default-features = false } @@ -86,14 +89,15 @@ thiserror = { version = "2", default-features = false } variadics_please = "1.1" tracing = { version = "0.1", default-features = false, optional = true } log = { version = "0.4", default-features = false } +cfg-if = "1.0.0" [target.'cfg(any(unix, windows))'.dependencies] ctrlc = { version = "3.4.4", optional = true } [target.'cfg(target_arch = "wasm32")'.dependencies] -wasm-bindgen = { version = "0.2" } -web-sys = { version = "0.3", features = ["Window"] } -console_error_panic_hook = "0.1.6" +wasm-bindgen = { version = "0.2", optional = true } +web-sys = { version = "0.3", features = ["Window"], optional = true } +console_error_panic_hook = { version = "0.1.6", optional = true } [dev-dependencies] crossbeam-channel = "0.5.0" diff --git a/crates/bevy_app/src/app.rs b/crates/bevy_app/src/app.rs index 081e3e7ac0..654a7098b9 100644 --- a/crates/bevy_app/src/app.rs +++ b/crates/bevy_app/src/app.rs @@ -13,13 +13,12 @@ use bevy_ecs::{ event::{event_update_system, EventCursor}, intern::Interned, prelude::*, - schedule::{ScheduleBuildSettings, ScheduleLabel}, - system::{IntoObserverSystem, SystemId, SystemInput}, + schedule::{InternedSystemSet, ScheduleBuildSettings, ScheduleLabel}, + system::{IntoObserverSystem, ScheduleSystem, SystemId, SystemInput}, }; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use core::{fmt::Debug, num::NonZero, panic::AssertUnwindSafe}; use log::debug; -use thiserror::Error; #[cfg(feature = "trace")] use tracing::info_span; @@ -44,7 +43,7 @@ pub use bevy_ecs::label::DynEq; /// A shorthand for `Interned`. pub type InternedAppLabel = Interned; -#[derive(Debug, Error)] +#[derive(Debug, thiserror::Error)] pub(crate) enum AppError { #[error("duplicate plugin {plugin_name:?}")] DuplicatePlugin { plugin_name: String }, @@ -302,7 +301,7 @@ impl App { pub fn add_systems( &mut self, schedule: impl ScheduleLabel, - systems: impl IntoSystemConfigs, + systems: impl IntoScheduleConfigs, ) -> &mut Self { self.main_mut().add_systems(schedule, systems); self @@ -330,10 +329,10 @@ impl App { /// Configures a collection of system sets in the provided schedule, adding any sets that do not exist. #[track_caller] - pub fn configure_sets( + pub fn configure_sets( &mut self, schedule: impl ScheduleLabel, - sets: impl IntoSystemSetConfigs, + sets: impl IntoScheduleConfigs, ) -> &mut Self { self.main_mut().configure_sets(schedule, sets); self @@ -1034,6 +1033,17 @@ impl App { .try_register_required_components_with::(constructor) } + /// Registers a component type as "disabling", + /// using [default query filters](bevy_ecs::entity_disabling::DefaultQueryFilters) to exclude entities with the component from queries. + /// + /// # Warning + /// + /// As discussed in the [module docs](bevy_ecs::entity_disabling), this can have performance implications, + /// as well as create interoperability issues, and should be used with caution. + pub fn register_disabling_component(&mut self) { + self.world_mut().register_disabling_component::(); + } + /// Returns a reference to the main [`SubApp`]'s [`World`]. This is the same as calling /// [`app.main().world()`]. /// @@ -1330,7 +1340,7 @@ type RunnerFn = Box AppExit>; fn run_once(mut app: App) -> AppExit { while app.plugins_state() == PluginsState::Adding { - #[cfg(all(not(target_arch = "wasm32"), feature = "bevy_tasks"))] + #[cfg(not(all(target_arch = "wasm32", feature = "web")))] bevy_tasks::tick_global_task_pools_on_main_thread(); } app.finish(); @@ -1394,7 +1404,6 @@ impl AppExit { } impl From for AppExit { - #[must_use] fn from(value: u8) -> Self { Self::from_code(value) } @@ -1413,7 +1422,7 @@ impl Termination for AppExit { #[cfg(test)] mod tests { - use core::{iter, marker::PhantomData}; + use core::marker::PhantomData; use std::sync::Mutex; use bevy_ecs::{ @@ -1424,7 +1433,7 @@ mod tests { query::With, removal_detection::RemovedComponents, resource::Resource, - schedule::{IntoSystemConfigs, ScheduleLabel}, + schedule::{IntoScheduleConfigs, ScheduleLabel}, system::{Commands, Query}, world::{FromWorld, World}, }; @@ -1637,7 +1646,7 @@ mod tests { struct Foo; let mut app = App::new(); - app.world_mut().spawn_batch(iter::repeat(Foo).take(5)); + app.world_mut().spawn_batch(core::iter::repeat_n(Foo, 5)); fn despawn_one_foo(mut commands: Commands, foos: Query>) { if let Some(e) = foos.iter().next() { @@ -1691,9 +1700,9 @@ mod tests { fn raise_exits(mut exits: EventWriter) { // Exit codes chosen by a fair dice roll. // Unlikely to overlap with default values. - exits.send(AppExit::Success); - exits.send(AppExit::from_code(4)); - exits.send(AppExit::from_code(73)); + exits.write(AppExit::Success); + exits.write(AppExit::from_code(4)); + exits.write(AppExit::from_code(73)); } let exit = App::new().add_systems(Update, raise_exits).run(); diff --git a/crates/bevy_app/src/lib.rs b/crates/bevy_app/src/lib.rs index 489b5415e3..6772136414 100644 --- a/crates/bevy_app/src/lib.rs +++ b/crates/bevy_app/src/lib.rs @@ -30,7 +30,6 @@ mod plugin; mod plugin_group; mod schedule_runner; mod sub_app; -#[cfg(feature = "bevy_tasks")] mod task_pool_plugin; #[cfg(all(any(unix, windows), feature = "std"))] mod terminal_ctrl_c_handler; @@ -42,7 +41,6 @@ pub use plugin::*; pub use plugin_group::*; pub use schedule_runner::*; pub use sub_app::*; -#[cfg(feature = "bevy_tasks")] pub use task_pool_plugin::*; #[cfg(all(any(unix, windows), feature = "std"))] pub use terminal_ctrl_c_handler::*; @@ -60,10 +58,6 @@ pub mod prelude { RunFixedMainLoopSystem, SpawnScene, Startup, Update, }, sub_app::SubApp, - Plugin, PluginGroup, + Plugin, PluginGroup, TaskPoolOptions, TaskPoolPlugin, }; - - #[cfg(feature = "bevy_tasks")] - #[doc(hidden)] - pub use crate::{NonSendMarker, TaskPoolOptions, TaskPoolPlugin}; } diff --git a/crates/bevy_app/src/main_schedule.rs b/crates/bevy_app/src/main_schedule.rs index 55f8bbf2be..23e8ca0c33 100644 --- a/crates/bevy_app/src/main_schedule.rs +++ b/crates/bevy_app/src/main_schedule.rs @@ -3,7 +3,7 @@ use alloc::{vec, vec::Vec}; use bevy_ecs::{ resource::Resource, schedule::{ - ExecutorKind, InternedScheduleLabel, IntoSystemSetConfigs, Schedule, ScheduleLabel, + ExecutorKind, InternedScheduleLabel, IntoScheduleConfigs, Schedule, ScheduleLabel, SystemSet, }, system::Local, @@ -15,6 +15,13 @@ use bevy_ecs::{ /// By default, it will run the following schedules in the given order: /// /// On the first run of the schedule (and only on the first run), it will run: +/// * [`StateTransition`] [^1] +/// * This means that [`OnEnter(MyState::Foo)`] will be called *before* [`PreStartup`] +/// if `MyState` was added to the app with `MyState::Foo` as the initial state, +/// as well as [`OnEnter(MyComputedState)`] if it `compute`s to `Some(Self)` in `MyState::Foo`. +/// * If you want to run systems before any state transitions, regardless of which state is the starting state, +/// for example, for registering required components, you can add your own custom startup schedule +/// before [`StateTransition`]. See [`MainScheduleOrder::insert_startup_before`] for more details. /// * [`PreStartup`] /// * [`Startup`] /// * [`PostStartup`] @@ -22,7 +29,7 @@ use bevy_ecs::{ /// Then it will run: /// * [`First`] /// * [`PreUpdate`] -/// * [`StateTransition`] +/// * [`StateTransition`] [^1] /// * [`RunFixedMainLoop`] /// * This will run [`FixedMain`] zero to many times, based on how much time has elapsed. /// * [`Update`] @@ -37,35 +44,39 @@ use bevy_ecs::{ /// /// See [`RenderPlugin`] and [`PipelinedRenderingPlugin`] for more details. /// +/// [^1]: [`StateTransition`] is inserted only if you have `bevy_state` feature enabled. It is enabled in `default` features. +/// /// [`StateTransition`]: https://docs.rs/bevy/latest/bevy/prelude/struct.StateTransition.html +/// [`OnEnter(MyState::Foo)`]: https://docs.rs/bevy/latest/bevy/prelude/struct.OnEnter.html +/// [`OnEnter(MyComputedState)`]: https://docs.rs/bevy/latest/bevy/prelude/struct.OnEnter.html /// [`RenderPlugin`]: https://docs.rs/bevy/latest/bevy/render/struct.RenderPlugin.html /// [`PipelinedRenderingPlugin`]: https://docs.rs/bevy/latest/bevy/render/pipelined_rendering/struct.PipelinedRenderingPlugin.html /// [`SubApp`]: crate::SubApp -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct Main; /// The schedule that runs before [`Startup`]. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct PreStartup; /// The schedule that runs once when the app starts. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct Startup; /// The schedule that runs once after [`Startup`]. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct PostStartup; /// Runs first in the schedule. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct First; /// The schedule that contains logic that must run before [`Update`]. For example, a system that reads raw keyboard @@ -76,7 +87,7 @@ pub struct First; /// [`PreUpdate`] abstracts out "pre work implementation details". /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct PreUpdate; /// Runs the [`FixedMain`] schedule in a loop according until all relevant elapsed time has been "consumed". @@ -88,21 +99,21 @@ pub struct PreUpdate; /// [`RunFixedMainLoop`] will *not* be parallelized between each other. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct RunFixedMainLoop; /// Runs first in the [`FixedMain`] schedule. /// /// See the [`FixedMain`] schedule for details on how fixed updates work. /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct FixedFirst; /// The schedule that contains logic that must run before [`FixedUpdate`]. /// /// See the [`FixedMain`] schedule for details on how fixed updates work. /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct FixedPreUpdate; /// The schedule that contains most gameplay logic, which runs at a fixed rate rather than every render frame. @@ -117,7 +128,7 @@ pub struct FixedPreUpdate; /// See the [`Update`] schedule for examples of systems that *should not* use this schedule. /// See the [`FixedMain`] schedule for details on how fixed updates work. /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct FixedUpdate; /// The schedule that runs after the [`FixedUpdate`] schedule, for reacting @@ -125,14 +136,14 @@ pub struct FixedUpdate; /// /// See the [`FixedMain`] schedule for details on how fixed updates work. /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct FixedPostUpdate; /// The schedule that runs last in [`FixedMain`] /// /// See the [`FixedMain`] schedule for details on how fixed updates work. /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct FixedLast; /// The schedule that contains systems which only run after a fixed period of time has elapsed. @@ -144,7 +155,7 @@ pub struct FixedLast; /// See [this example](https://github.com/bevyengine/bevy/blob/latest/examples/time/time.rs). /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct FixedMain; /// The schedule that contains any app logic that must run once per render frame. @@ -157,13 +168,13 @@ pub struct FixedMain; /// /// See the [`FixedUpdate`] schedule for examples of systems that *should not* use this schedule. /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct Update; /// The schedule that contains scene spawning. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct SpawnScene; /// The schedule that contains logic that must run after [`Update`]. For example, synchronizing "local transforms" in a hierarchy @@ -174,13 +185,13 @@ pub struct SpawnScene; /// [`PostUpdate`] abstracts out "implementation details" from users defining systems in [`Update`]. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct PostUpdate; /// Runs last in the schedule. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct Last; /// Animation system set. This exists in [`PostUpdate`]. @@ -316,7 +327,7 @@ impl Plugin for MainSchedulePlugin { #[cfg(feature = "bevy_debug_stepping")] { - use bevy_ecs::schedule::{IntoSystemConfigs, Stepping}; + use bevy_ecs::schedule::{IntoScheduleConfigs, Stepping}; app.add_systems(Main, Stepping::begin_frame.before(Main::run_main)); } } diff --git a/crates/bevy_app/src/panic_handler.rs b/crates/bevy_app/src/panic_handler.rs index 56d66da728..1021a3dc2e 100644 --- a/crates/bevy_app/src/panic_handler.rs +++ b/crates/bevy_app/src/panic_handler.rs @@ -11,7 +11,7 @@ use crate::{App, Plugin}; /// Adds sensible panic handlers to Apps. This plugin is part of the `DefaultPlugins`. Adding /// this plugin will setup a panic hook appropriate to your target platform: /// * On Wasm, uses [`console_error_panic_hook`](https://crates.io/crates/console_error_panic_hook), logging -/// to the browser console. +/// to the browser console. /// * Other platforms are currently not setup. /// /// ```no_run @@ -39,13 +39,23 @@ pub struct PanicHandlerPlugin; impl Plugin for PanicHandlerPlugin { fn build(&self, _app: &mut App) { - #[cfg(target_arch = "wasm32")] + #[cfg(feature = "std")] { - console_error_panic_hook::set_once(); - } - #[cfg(not(target_arch = "wasm32"))] - { - // Use the default target panic hook - Do nothing. + static SET_HOOK: std::sync::Once = std::sync::Once::new(); + SET_HOOK.call_once(|| { + cfg_if::cfg_if! { + if #[cfg(all(target_arch = "wasm32", feature = "web"))] { + // This provides better panic handling in JS engines (displays the panic message and improves the backtrace). + std::panic::set_hook(alloc::boxed::Box::new(console_error_panic_hook::hook)); + } else if #[cfg(feature = "error_panic_hook")] { + let current_hook = std::panic::take_hook(); + std::panic::set_hook(alloc::boxed::Box::new( + bevy_ecs::error::bevy_error_panic_hook(current_hook), + )); + } + // Otherwise use the default target panic hook - Do nothing. + } + }); } } } diff --git a/crates/bevy_app/src/plugin_group.rs b/crates/bevy_app/src/plugin_group.rs index dca847f77f..60897d5453 100644 --- a/crates/bevy_app/src/plugin_group.rs +++ b/crates/bevy_app/src/plugin_group.rs @@ -4,7 +4,7 @@ use alloc::{ string::{String, ToString}, vec::Vec, }; -use bevy_platform_support::collections::hash_map::Entry; +use bevy_platform::collections::hash_map::Entry; use bevy_utils::TypeIdMap; use core::any::TypeId; use log::{debug, warn}; diff --git a/crates/bevy_app/src/schedule_runner.rs b/crates/bevy_app/src/schedule_runner.rs index 2a0e1f966a..594f849b2f 100644 --- a/crates/bevy_app/src/schedule_runner.rs +++ b/crates/bevy_app/src/schedule_runner.rs @@ -3,10 +3,10 @@ use crate::{ plugin::Plugin, PluginsState, }; -use bevy_platform_support::time::Instant; +use bevy_platform::time::Instant; use core::time::Duration; -#[cfg(target_arch = "wasm32")] +#[cfg(all(target_arch = "wasm32", feature = "web"))] use { alloc::{boxed::Box, rc::Rc}, core::cell::RefCell, @@ -77,7 +77,7 @@ impl Plugin for ScheduleRunnerPlugin { let plugins_state = app.plugins_state(); if plugins_state != PluginsState::Cleaned { while app.plugins_state() == PluginsState::Adding { - #[cfg(all(not(target_arch = "wasm32"), feature = "bevy_tasks"))] + #[cfg(not(all(target_arch = "wasm32", feature = "web")))] bevy_tasks::tick_global_task_pools_on_main_thread(); } app.finish(); @@ -118,58 +118,55 @@ impl Plugin for ScheduleRunnerPlugin { Ok(None) }; - #[cfg(not(target_arch = "wasm32"))] - { - loop { - match tick(&mut app, wait) { - Ok(Some(_delay)) => { - #[cfg(feature = "std")] - std::thread::sleep(_delay); - } - Ok(None) => continue, - Err(exit) => return exit, + cfg_if::cfg_if! { + if #[cfg(all(target_arch = "wasm32", feature = "web"))] { + fn set_timeout(callback: &Closure, dur: Duration) { + web_sys::window() + .unwrap() + .set_timeout_with_callback_and_timeout_and_arguments_0( + callback.as_ref().unchecked_ref(), + dur.as_millis() as i32, + ) + .expect("Should register `setTimeout`."); } - } - } + let asap = Duration::from_millis(1); - #[cfg(target_arch = "wasm32")] - { - fn set_timeout(callback: &Closure, dur: Duration) { - web_sys::window() - .unwrap() - .set_timeout_with_callback_and_timeout_and_arguments_0( - callback.as_ref().unchecked_ref(), - dur.as_millis() as i32, - ) - .expect("Should register `setTimeout`."); - } - let asap = Duration::from_millis(1); + let exit = Rc::new(RefCell::new(AppExit::Success)); + let closure_exit = exit.clone(); - let exit = Rc::new(RefCell::new(AppExit::Success)); - let closure_exit = exit.clone(); + let mut app = Rc::new(app); + let moved_tick_closure = Rc::new(RefCell::new(None)); + let base_tick_closure = moved_tick_closure.clone(); - let mut app = Rc::new(app); - let moved_tick_closure = Rc::new(RefCell::new(None)); - let base_tick_closure = moved_tick_closure.clone(); + let tick_app = move || { + let app = Rc::get_mut(&mut app).unwrap(); + let delay = tick(app, wait); + match delay { + Ok(delay) => set_timeout( + moved_tick_closure.borrow().as_ref().unwrap(), + delay.unwrap_or(asap), + ), + Err(code) => { + closure_exit.replace(code); + } + } + }; + *base_tick_closure.borrow_mut() = + Some(Closure::wrap(Box::new(tick_app) as Box)); + set_timeout(base_tick_closure.borrow().as_ref().unwrap(), asap); - let tick_app = move || { - let app = Rc::get_mut(&mut app).unwrap(); - let delay = tick(app, wait); - match delay { - Ok(delay) => set_timeout( - moved_tick_closure.borrow().as_ref().unwrap(), - delay.unwrap_or(asap), - ), - Err(code) => { - closure_exit.replace(code); + exit.take() + } else { + loop { + match tick(&mut app, wait) { + Ok(Some(delay)) => { + bevy_platform::thread::sleep(delay); + } + Ok(None) => continue, + Err(exit) => return exit, } } - }; - *base_tick_closure.borrow_mut() = - Some(Closure::wrap(Box::new(tick_app) as Box)); - set_timeout(base_tick_closure.borrow().as_ref().unwrap(), asap); - - exit.take() + } } } } diff --git a/crates/bevy_app/src/sub_app.rs b/crates/bevy_app/src/sub_app.rs index 2e3f48f2ca..c340b80654 100644 --- a/crates/bevy_app/src/sub_app.rs +++ b/crates/bevy_app/src/sub_app.rs @@ -3,10 +3,10 @@ use alloc::{boxed::Box, string::String, vec::Vec}; use bevy_ecs::{ event::EventRegistry, prelude::*, - schedule::{InternedScheduleLabel, ScheduleBuildSettings, ScheduleLabel}, - system::{SystemId, SystemInput}, + schedule::{InternedScheduleLabel, InternedSystemSet, ScheduleBuildSettings, ScheduleLabel}, + system::{ScheduleSystem, SystemId, SystemInput}, }; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use core::fmt::Debug; #[cfg(feature = "trace")] @@ -211,7 +211,7 @@ impl SubApp { pub fn add_systems( &mut self, schedule: impl ScheduleLabel, - systems: impl IntoSystemConfigs, + systems: impl IntoScheduleConfigs, ) -> &mut Self { let mut schedules = self.world.resource_mut::(); schedules.add_systems(schedule, systems); @@ -233,10 +233,10 @@ impl SubApp { /// See [`App::configure_sets`]. #[track_caller] - pub fn configure_sets( + pub fn configure_sets( &mut self, schedule: impl ScheduleLabel, - sets: impl IntoSystemSetConfigs, + sets: impl IntoScheduleConfigs, ) -> &mut Self { let mut schedules = self.world.resource_mut::(); schedules.configure_sets(schedule, sets); diff --git a/crates/bevy_app/src/task_pool_plugin.rs b/crates/bevy_app/src/task_pool_plugin.rs index d2146d9a65..5ed4e3fa5d 100644 --- a/crates/bevy_app/src/task_pool_plugin.rs +++ b/crates/bevy_app/src/task_pool_plugin.rs @@ -1,24 +1,25 @@ -#![cfg_attr( - feature = "portable-atomic", - expect( - clippy::redundant_closure, - reason = "bevy_platform_support::sync::Arc has subtly different implicit behavior" - ) -)] - use crate::{App, Plugin}; use alloc::string::ToString; -use bevy_platform_support::sync::Arc; +use bevy_platform::sync::Arc; use bevy_tasks::{AsyncComputeTaskPool, ComputeTaskPool, IoTaskPool, TaskPoolBuilder}; -use core::{fmt::Debug, marker::PhantomData}; +use core::fmt::Debug; use log::trace; -#[cfg(not(target_arch = "wasm32"))] -use {crate::Last, bevy_ecs::prelude::NonSend}; +cfg_if::cfg_if! { + if #[cfg(not(all(target_arch = "wasm32", feature = "web")))] { + use {crate::Last, bevy_tasks::tick_global_task_pools_on_main_thread}; + use bevy_ecs::system::NonSendMarker; -#[cfg(not(target_arch = "wasm32"))] -use bevy_tasks::tick_global_task_pools_on_main_thread; + /// A system used to check and advanced our task pools. + /// + /// Calls [`tick_global_task_pools_on_main_thread`], + /// and uses [`NonSendMarker`] to ensure that this system runs on the main thread + fn tick_global_task_pools(_main_thread_marker: NonSendMarker) { + tick_global_task_pools_on_main_thread(); + } + } +} /// Setup of default task pools: [`AsyncComputeTaskPool`], [`ComputeTaskPool`], [`IoTaskPool`]. #[derive(Default)] @@ -32,21 +33,10 @@ impl Plugin for TaskPoolPlugin { // Setup the default bevy task pools self.task_pool_options.create_default_pools(); - #[cfg(not(target_arch = "wasm32"))] + #[cfg(not(all(target_arch = "wasm32", feature = "web")))] _app.add_systems(Last, tick_global_task_pools); } } -/// A dummy type that is [`!Send`](Send), to force systems to run on the main thread. -pub struct NonSendMarker(PhantomData<*mut ()>); - -/// A system used to check and advanced our task pools. -/// -/// Calls [`tick_global_task_pools_on_main_thread`], -/// and uses [`NonSendMarker`] to ensure that this system runs on the main thread -#[cfg(not(target_arch = "wasm32"))] -fn tick_global_task_pools(_main_thread_marker: Option>) { - tick_global_task_pools_on_main_thread(); -} /// Defines a simple way to determine how many threads to use given the number of remaining cores /// and number of total cores @@ -184,20 +174,21 @@ impl TaskPoolOptions { remaining_threads = remaining_threads.saturating_sub(io_threads); IoTaskPool::get_or_init(|| { - #[cfg_attr(target_arch = "wasm32", expect(unused_mut))] - let mut builder = TaskPoolBuilder::default() + let builder = TaskPoolBuilder::default() .num_threads(io_threads) .thread_name("IO Task Pool".to_string()); - #[cfg(not(target_arch = "wasm32"))] - { + #[cfg(not(all(target_arch = "wasm32", feature = "web")))] + let builder = { + let mut builder = builder; if let Some(f) = self.io.on_thread_spawn.clone() { builder = builder.on_thread_spawn(move || f()); } if let Some(f) = self.io.on_thread_destroy.clone() { builder = builder.on_thread_destroy(move || f()); } - } + builder + }; builder.build() }); @@ -213,20 +204,21 @@ impl TaskPoolOptions { remaining_threads = remaining_threads.saturating_sub(async_compute_threads); AsyncComputeTaskPool::get_or_init(|| { - #[cfg_attr(target_arch = "wasm32", expect(unused_mut))] - let mut builder = TaskPoolBuilder::default() + let builder = TaskPoolBuilder::default() .num_threads(async_compute_threads) .thread_name("Async Compute Task Pool".to_string()); - #[cfg(not(target_arch = "wasm32"))] - { + #[cfg(not(all(target_arch = "wasm32", feature = "web")))] + let builder = { + let mut builder = builder; if let Some(f) = self.async_compute.on_thread_spawn.clone() { builder = builder.on_thread_spawn(move || f()); } if let Some(f) = self.async_compute.on_thread_destroy.clone() { builder = builder.on_thread_destroy(move || f()); } - } + builder + }; builder.build() }); @@ -242,20 +234,21 @@ impl TaskPoolOptions { trace!("Compute Threads: {}", compute_threads); ComputeTaskPool::get_or_init(|| { - #[cfg_attr(target_arch = "wasm32", expect(unused_mut))] - let mut builder = TaskPoolBuilder::default() + let builder = TaskPoolBuilder::default() .num_threads(compute_threads) .thread_name("Compute Task Pool".to_string()); - #[cfg(not(target_arch = "wasm32"))] - { + #[cfg(not(all(target_arch = "wasm32", feature = "web")))] + let builder = { + let mut builder = builder; if let Some(f) = self.compute.on_thread_spawn.clone() { builder = builder.on_thread_spawn(move || f()); } if let Some(f) = self.compute.on_thread_destroy.clone() { builder = builder.on_thread_destroy(move || f()); } - } + builder + }; builder.build() }); diff --git a/crates/bevy_app/src/terminal_ctrl_c_handler.rs b/crates/bevy_app/src/terminal_ctrl_c_handler.rs index 0eb34ccdbe..48af3c09f1 100644 --- a/crates/bevy_app/src/terminal_ctrl_c_handler.rs +++ b/crates/bevy_app/src/terminal_ctrl_c_handler.rs @@ -50,7 +50,7 @@ impl TerminalCtrlCHandlerPlugin { /// Sends a [`AppExit`] event when the user presses `Ctrl+C` on the terminal. pub fn exit_on_flag(mut events: EventWriter) { if SHOULD_EXIT.load(Ordering::Relaxed) { - events.send(AppExit::from_code(130)); + events.write(AppExit::from_code(130)); } } } diff --git a/crates/bevy_asset/Cargo.toml b/crates/bevy_asset/Cargo.toml index 699029f52c..a3c16a47b1 100644 --- a/crates/bevy_asset/Cargo.toml +++ b/crates/bevy_asset/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_asset" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides asset functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -29,7 +29,7 @@ bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ ] } bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", ] } @@ -58,6 +58,7 @@ tracing = { version = "0.1", default-features = false, features = ["std"] } bevy_window = { path = "../bevy_window", version = "0.16.0-dev" } [target.'cfg(target_arch = "wasm32")'.dependencies] +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. wasm-bindgen = { version = "0.2" } web-sys = { version = "0.3", features = [ "Window", @@ -67,6 +68,15 @@ web-sys = { version = "0.3", features = [ wasm-bindgen-futures = "0.4" js-sys = "0.3" uuid = { version = "1.13.1", default-features = false, features = ["js"] } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] notify-debouncer-full = { version = "0.5.0", optional = true } @@ -77,9 +87,6 @@ ureq = { version = "3", optional = true, default-features = false, features = [ ] } -[dev-dependencies] -bevy_log = { path = "../bevy_log", version = "0.16.0-dev" } - [lints] workspace = true diff --git a/crates/bevy_asset/macros/Cargo.toml b/crates/bevy_asset/macros/Cargo.toml index 9b6c4f56a1..43562ae806 100644 --- a/crates/bevy_asset/macros/Cargo.toml +++ b/crates/bevy_asset/macros/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_asset_macros" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Derive implementations for bevy_asset" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_asset/src/asset_changed.rs b/crates/bevy_asset/src/asset_changed.rs index f11283f488..40723d7b08 100644 --- a/crates/bevy_asset/src/asset_changed.rs +++ b/crates/bevy_asset/src/asset_changed.rs @@ -13,7 +13,7 @@ use bevy_ecs::{ storage::{Table, TableRow}, world::unsafe_world_cell::UnsafeWorldCell, }; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use core::marker::PhantomData; use disqualified::ShortName; use tracing::error; @@ -281,6 +281,7 @@ unsafe impl QueryFilter for AssetChanged { } #[cfg(test)] +#[expect(clippy::print_stdout, reason = "Allowed in tests.")] mod tests { use crate::{AssetEvents, AssetPlugin, Handle}; use alloc::{vec, vec::Vec}; @@ -289,7 +290,7 @@ mod tests { use crate::{AssetApp, Assets}; use bevy_app::{App, AppExit, PostUpdate, Startup, TaskPoolPlugin, Update}; - use bevy_ecs::schedule::IntoSystemConfigs; + use bevy_ecs::schedule::IntoScheduleConfigs; use bevy_ecs::{ component::Component, event::EventWriter, @@ -330,7 +331,7 @@ mod tests { _query: Query<&mut MyComponent, AssetChanged>, mut exit: EventWriter, ) { - exit.send(AppExit::Error(NonZero::::MIN)); + exit.write(AppExit::Error(NonZero::::MIN)); } run_app(compatible_filter); } diff --git a/crates/bevy_asset/src/assets.rs b/crates/bevy_asset/src/assets.rs index f16feebdc1..9fa8eb4381 100644 --- a/crates/bevy_asset/src/assets.rs +++ b/crates/bevy_asset/src/assets.rs @@ -6,7 +6,7 @@ use bevy_ecs::{ resource::Resource, system::{Res, ResMut, SystemChangeTick}, }; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::{Reflect, TypePath}; use core::{any::TypeId, iter::Enumerate, marker::PhantomData, sync::atomic::AtomicU32}; use crossbeam_channel::{Receiver, Sender}; @@ -95,6 +95,7 @@ impl AssetIndexAllocator { /// [`AssetPath`]: crate::AssetPath #[derive(Asset, TypePath)] pub struct LoadedUntypedAsset { + /// The handle to the loaded asset. #[dependency] pub handle: UntypedHandle, } @@ -280,6 +281,8 @@ impl DenseAssetStorage { /// at compile time. /// /// This tracks (and queues) [`AssetEvent`] events whenever changes to the collection occur. +/// To check whether the asset used by a given component has changed (due to a change in the handle or the underlying asset) +/// use the [`AssetChanged`](crate::asset_changed::AssetChanged) query filter. #[derive(Resource)] pub struct Assets { dense_storage: DenseAssetStorage, @@ -459,16 +462,22 @@ impl Assets { /// Removes the [`Asset`] with the given `id`. pub(crate) fn remove_dropped(&mut self, id: AssetId) { match self.duplicate_handles.get_mut(&id) { - None | Some(0) => {} + None => {} + Some(0) => { + self.duplicate_handles.remove(&id); + } Some(value) => { *value -= 1; return; } } + let existed = match id { AssetId::Index { index, .. } => self.dense_storage.remove_dropped(index).is_some(), AssetId::Uuid { uuid } => self.hash_map.remove(&uuid).is_some(), }; + + self.queued_events.push(AssetEvent::Unused { id }); if existed { self.queued_events.push(AssetEvent::Removed { id }); } @@ -550,7 +559,6 @@ impl Assets { } } - assets.queued_events.push(AssetEvent::Unused { id }); assets.remove_dropped(id); } } @@ -576,7 +584,7 @@ impl Assets { }; } } - events.send_batch(assets.queued_events.drain(..)); + events.write_batch(assets.queued_events.drain(..)); } /// A run condition for [`asset_events`]. The system will not run if there are no events to @@ -592,7 +600,7 @@ impl Assets { pub struct AssetsMutIterator<'a, A: Asset> { queued_events: &'a mut Vec>, dense_storage: Enumerate>>, - hash_map: bevy_platform_support::collections::hash_map::IterMut<'a, Uuid, A>, + hash_map: bevy_platform::collections::hash_map::IterMut<'a, Uuid, A>, } impl<'a, A: Asset> Iterator for AssetsMutIterator<'a, A> { @@ -629,6 +637,7 @@ impl<'a, A: Asset> Iterator for AssetsMutIterator<'a, A> { } } +/// An error returned when an [`AssetIndex`] has an invalid generation. #[derive(Error, Debug)] #[error("AssetIndex {index:?} has an invalid generation. The current generation is: '{current_generation}'.")] pub struct InvalidGenerationError { diff --git a/crates/bevy_asset/src/direct_access_ext.rs b/crates/bevy_asset/src/direct_access_ext.rs index bfa7fa17b2..792d523a30 100644 --- a/crates/bevy_asset/src/direct_access_ext.rs +++ b/crates/bevy_asset/src/direct_access_ext.rs @@ -5,6 +5,7 @@ use bevy_ecs::world::World; use crate::{meta::Settings, Asset, AssetPath, AssetServer, Assets, Handle}; +/// An extension trait for methods for working with assets directly from a [`World`]. pub trait DirectAssetAccessExt { /// Insert an asset similarly to [`Assets::add`]. fn add_asset(&mut self, asset: impl Into) -> Handle; diff --git a/crates/bevy_asset/src/event.rs b/crates/bevy_asset/src/event.rs index 832cc212d4..087cb44b5a 100644 --- a/crates/bevy_asset/src/event.rs +++ b/crates/bevy_asset/src/event.rs @@ -8,6 +8,7 @@ use core::fmt::Debug; /// For an untyped equivalent, see [`UntypedAssetLoadFailedEvent`]. #[derive(Event, Clone, Debug)] pub struct AssetLoadFailedEvent { + /// The stable identifier of the asset that failed to load. pub id: AssetId, /// The asset path that was attempted. pub path: AssetPath<'static>, @@ -25,6 +26,7 @@ impl AssetLoadFailedEvent { /// An untyped version of [`AssetLoadFailedEvent`]. #[derive(Event, Clone, Debug)] pub struct UntypedAssetLoadFailedEvent { + /// The stable identifier of the asset that failed to load. pub id: UntypedAssetId, /// The asset path that was attempted. pub path: AssetPath<'static>, @@ -43,6 +45,7 @@ impl From<&AssetLoadFailedEvent> for UntypedAssetLoadFailedEvent { } /// Events that occur for a specific loaded [`Asset`], such as "value changed" events and "dependency" events. +#[expect(missing_docs, reason = "Documenting the id fields is unhelpful.")] #[derive(Event, Reflect)] pub enum AssetEvent { /// Emitted whenever an [`Asset`] is added. diff --git a/crates/bevy_asset/src/folder.rs b/crates/bevy_asset/src/folder.rs index 698b19d0c2..c591c88688 100644 --- a/crates/bevy_asset/src/folder.rs +++ b/crates/bevy_asset/src/folder.rs @@ -5,9 +5,12 @@ use bevy_reflect::TypePath; /// A "loaded folder" containing handles for all assets stored in a given [`AssetPath`]. /// +/// This is produced by [`AssetServer::load_folder`](crate::prelude::AssetServer::load_folder). +/// /// [`AssetPath`]: crate::AssetPath #[derive(Asset, TypePath)] pub struct LoadedFolder { + /// The handles of all assets stored in the folder. #[dependency] pub handles: Vec, } diff --git a/crates/bevy_asset/src/handle.rs b/crates/bevy_asset/src/handle.rs index 661e2fb5d9..e6ad1d074a 100644 --- a/crates/bevy_asset/src/handle.rs +++ b/crates/bevy_asset/src/handle.rs @@ -113,16 +113,23 @@ impl core::fmt::Debug for StrongHandle { } } -/// A strong or weak handle to a specific [`Asset`]. If a [`Handle`] is [`Handle::Strong`], the [`Asset`] will be kept +/// A handle to a specific [`Asset`] of type `A`. Handles act as abstract "references" to +/// assets, whose data are stored in the [`Assets`](crate::prelude::Assets) resource, +/// avoiding the need to store multiple copies of the same data. +/// +/// If a [`Handle`] is [`Handle::Strong`], the [`Asset`] will be kept /// alive until the [`Handle`] is dropped. If a [`Handle`] is [`Handle::Weak`], it does not necessarily reference a live [`Asset`], /// nor will it keep assets alive. /// +/// Modifying a *handle* will change which existing asset is referenced, but modifying the *asset* +/// (by mutating the [`Assets`](crate::prelude::Assets) resource) will change the asset for all handles referencing it. +/// /// [`Handle`] can be cloned. If a [`Handle::Strong`] is cloned, the referenced [`Asset`] will not be freed until _all_ instances /// of the [`Handle`] are dropped. /// -/// [`Handle::Strong`] also provides access to useful [`Asset`] metadata, such as the [`AssetPath`] (if it exists). +/// [`Handle::Strong`], via [`StrongHandle`] also provides access to useful [`Asset`] metadata, such as the [`AssetPath`] (if it exists). #[derive(Reflect)] -#[reflect(Default, Debug, Hash, PartialEq)] +#[reflect(Default, Debug, Hash, PartialEq, Clone)] pub enum Handle { /// A "strong" reference to a live (or loading) [`Asset`]. If a [`Handle`] is [`Handle::Strong`], the [`Asset`] will be kept /// alive until the [`Handle`] is dropped. Strong handles also provide access to additional asset metadata. @@ -143,7 +150,10 @@ impl Clone for Handle { impl Handle { /// Create a new [`Handle::Weak`] with the given [`u128`] encoding of a [`Uuid`]. - #[deprecated = "use the `weak_handle!` macro with a UUID string instead"] + #[deprecated( + since = "0.16.0", + note = "use the `weak_handle!` macro with a UUID string instead" + )] pub const fn weak_from_u128(value: u128) -> Self { Handle::Weak(AssetId::Uuid { uuid: Uuid::from_u128(value), @@ -284,7 +294,9 @@ impl From<&mut Handle> for UntypedAssetId { /// See [`Handle`] for more information. #[derive(Clone)] pub enum UntypedHandle { + /// A strong handle, which will keep the referenced [`Asset`] alive until all strong handles are dropped. Strong(Arc), + /// A weak handle, which does not keep the referenced [`Asset`] alive. Weak(UntypedAssetId), } @@ -528,13 +540,18 @@ pub enum UntypedAssetConversionError { #[error( "This UntypedHandle is for {found:?} and cannot be converted into a Handle<{expected:?}>" )] - TypeIdMismatch { expected: TypeId, found: TypeId }, + TypeIdMismatch { + /// The expected [`TypeId`] of the [`Handle`] being converted to. + expected: TypeId, + /// The [`TypeId`] of the [`UntypedHandle`] being converted from. + found: TypeId, + }, } #[cfg(test)] mod tests { use alloc::boxed::Box; - use bevy_platform_support::hash::FixedHasher; + use bevy_platform::hash::FixedHasher; use bevy_reflect::PartialReflect; use core::hash::BuildHasher; @@ -644,7 +661,7 @@ mod tests { assert_eq!(UntypedHandle::from(typed.clone()), untyped); } - /// `Reflect::clone_value` should increase the strong count of a strong handle + /// `PartialReflect::reflect_clone`/`PartialReflect::to_dynamic` should increase the strong count of a strong handle #[test] fn strong_handle_reflect_clone() { use crate::{AssetApp, AssetPlugin, Assets, VisitAssetDependencies}; @@ -675,7 +692,7 @@ mod tests { ); let reflected: &dyn Reflect = &handle; - let cloned_handle: Box = reflected.clone_value(); + let _cloned_handle: Box = reflected.reflect_clone().unwrap(); assert_eq!( Arc::strong_count(strong), @@ -683,10 +700,18 @@ mod tests { "Cloning the handle with reflect should increase the strong count to 2" ); - let from_reflect_handle: Handle = - FromReflect::from_reflect(&*cloned_handle).unwrap(); + let dynamic_handle: Box = reflected.to_dynamic(); - assert_eq!(Arc::strong_count(strong), 3, "Converting the reflected value back to a handle should increase the strong count to 3"); + assert_eq!( + Arc::strong_count(strong), + 3, + "Converting the handle to a dynamic should increase the strong count to 3" + ); + + let from_reflect_handle: Handle = + FromReflect::from_reflect(&*dynamic_handle).unwrap(); + + assert_eq!(Arc::strong_count(strong), 4, "Converting the reflected value back to a handle should increase the strong count to 4"); assert!( from_reflect_handle.is_strong(), "The cloned handle should still be strong" diff --git a/crates/bevy_asset/src/id.rs b/crates/bevy_asset/src/id.rs index a1fe13615e..f9aa0d1b96 100644 --- a/crates/bevy_asset/src/id.rs +++ b/crates/bevy_asset/src/id.rs @@ -1,5 +1,5 @@ use crate::{Asset, AssetIndex}; -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -19,6 +19,7 @@ use thiserror::Error; /// /// For an "untyped" / "generic-less" id, see [`UntypedAssetId`]. #[derive(Reflect, Serialize, Deserialize, From)] +#[reflect(Clone, Default, Debug, PartialEq, Hash)] pub enum AssetId { /// A small / efficient runtime identifier that can be used to efficiently look up an asset stored in [`Assets`]. This is /// the "default" identifier used for assets. The alternative(s) (ex: [`AssetId::Uuid`]) will only be used if assets are @@ -26,15 +27,20 @@ pub enum AssetId { /// /// [`Assets`]: crate::Assets Index { + /// The unstable, opaque index of the asset. index: AssetIndex, - #[reflect(ignore)] + /// A marker to store the type information of the asset. + #[reflect(ignore, clone)] marker: PhantomData A>, }, /// A stable-across-runs / const asset identifier. This will only be used if an asset is explicitly registered in [`Assets`] /// with one. /// /// [`Assets`]: crate::Assets - Uuid { uuid: Uuid }, + Uuid { + /// The UUID provided during asset registration. + uuid: Uuid, + }, } impl AssetId { @@ -165,12 +171,22 @@ pub enum UntypedAssetId { /// explicitly registered that way. /// /// [`Assets`]: crate::Assets - Index { type_id: TypeId, index: AssetIndex }, + Index { + /// An identifier that records the underlying asset type. + type_id: TypeId, + /// The unstable, opaque index of the asset. + index: AssetIndex, + }, /// A stable-across-runs / const asset identifier. This will only be used if an asset is explicitly registered in [`Assets`] /// with one. /// /// [`Assets`]: crate::Assets - Uuid { type_id: TypeId, uuid: Uuid }, + Uuid { + /// An identifier that records the underlying asset type. + type_id: TypeId, + /// The UUID provided during asset registration. + uuid: Uuid, + }, } impl UntypedAssetId { @@ -404,7 +420,12 @@ impl TryFrom for AssetId { pub enum UntypedAssetIdConversionError { /// Caused when trying to convert an [`UntypedAssetId`] into an [`AssetId`] of the wrong type. #[error("This UntypedAssetId is for {found:?} and cannot be converted into an AssetId<{expected:?}>")] - TypeIdMismatch { expected: TypeId, found: TypeId }, + TypeIdMismatch { + /// The [`TypeId`] of the asset that we are trying to convert to. + expected: TypeId, + /// The [`TypeId`] of the asset that we are trying to convert from. + found: TypeId, + }, } #[cfg(test)] @@ -420,7 +441,7 @@ mod tests { fn hash(data: &T) -> u64 { use core::hash::BuildHasher; - bevy_platform_support::hash::FixedHasher.hash_one(data) + bevy_platform::hash::FixedHasher.hash_one(data) } /// Typed and Untyped `AssetIds` should be equivalent to each other and themselves diff --git a/crates/bevy_asset/src/io/embedded/embedded_watcher.rs b/crates/bevy_asset/src/io/embedded/embedded_watcher.rs index 25dd55ff48..f7fb56be74 100644 --- a/crates/bevy_asset/src/io/embedded/embedded_watcher.rs +++ b/crates/bevy_asset/src/io/embedded/embedded_watcher.rs @@ -4,7 +4,7 @@ use crate::io::{ AssetSourceEvent, AssetWatcher, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use core::time::Duration; use notify_debouncer_full::{notify::RecommendedWatcher, Debouncer, RecommendedCache}; use parking_lot::RwLock; @@ -24,6 +24,7 @@ pub struct EmbeddedWatcher { } impl EmbeddedWatcher { + /// Creates a new `EmbeddedWatcher` that watches for changes to the embedded assets in the given `dir`. pub fn new( dir: Dir, root_paths: Arc, PathBuf>>>, diff --git a/crates/bevy_asset/src/io/embedded/mod.rs b/crates/bevy_asset/src/io/embedded/mod.rs index cba775aaa6..13610531e2 100644 --- a/crates/bevy_asset/src/io/embedded/mod.rs +++ b/crates/bevy_asset/src/io/embedded/mod.rs @@ -15,6 +15,8 @@ use std::path::{Path, PathBuf}; #[cfg(feature = "embedded_watcher")] use alloc::borrow::ToOwned; +/// The name of the `embedded` [`AssetSource`], +/// as stored in the [`AssetSourceBuilders`] resource. pub const EMBEDDED: &str = "embedded"; /// A [`Resource`] that manages "rust source files" in a virtual in memory [`Dir`], which is intended @@ -27,7 +29,7 @@ pub struct EmbeddedAssetRegistry { dir: Dir, #[cfg(feature = "embedded_watcher")] root_paths: alloc::sync::Arc< - parking_lot::RwLock, PathBuf>>, + parking_lot::RwLock, PathBuf>>, >, } @@ -77,6 +79,7 @@ impl EmbeddedAssetRegistry { self.dir.remove_asset(full_path) } + /// Registers the [`EMBEDDED`] [`AssetSource`] with the given [`AssetSourceBuilders`]. pub fn register_source(&self, sources: &mut AssetSourceBuilders) { let dir = self.dir.clone(); let processed_dir = self.dir.clone(); diff --git a/crates/bevy_asset/src/io/file/file_watcher.rs b/crates/bevy_asset/src/io/file/file_watcher.rs index c8700dcdd5..e70cf1665f 100644 --- a/crates/bevy_asset/src/io/file/file_watcher.rs +++ b/crates/bevy_asset/src/io/file/file_watcher.rs @@ -18,7 +18,9 @@ use std::path::{Path, PathBuf}; use tracing::error; /// An [`AssetWatcher`] that watches the filesystem for changes to asset files in a given root folder and emits [`AssetSourceEvent`] -/// for each relevant change. This uses [`notify_debouncer_full`] to retrieve "debounced" filesystem events. +/// for each relevant change. +/// +/// This uses [`notify_debouncer_full`] to retrieve "debounced" filesystem events. /// "Debouncing" defines a time window to hold on to events and then removes duplicate events that fall into this window. /// This introduces a small delay in processing events, but it helps reduce event duplicates. A small delay is also necessary /// on some systems to avoid processing a change event before it has actually been applied. @@ -27,12 +29,13 @@ pub struct FileWatcher { } impl FileWatcher { + /// Creates a new [`FileWatcher`] that watches for changes to the asset files in the given `path`. pub fn new( path: PathBuf, sender: Sender, debounce_wait_time: Duration, ) -> Result { - let root = normalize_path(&path); + let root = normalize_path(&path).canonicalize()?; let watcher = new_asset_event_debouncer( path.clone(), debounce_wait_time, @@ -259,7 +262,8 @@ impl FilesystemEventHandler for FileEventHandler { self.last_event = None; } fn get_path(&self, absolute_path: &Path) -> Option<(PathBuf, bool)> { - Some(get_asset_path(&self.root, absolute_path)) + let absolute_path = absolute_path.canonicalize().ok()?; + Some(get_asset_path(&self.root, &absolute_path)) } fn handle(&mut self, _absolute_paths: &[PathBuf], event: AssetSourceEvent) { diff --git a/crates/bevy_asset/src/io/file/mod.rs b/crates/bevy_asset/src/io/file/mod.rs index 719b424e6d..96c43072e8 100644 --- a/crates/bevy_asset/src/io/file/mod.rs +++ b/crates/bevy_asset/src/io/file/mod.rs @@ -65,15 +65,14 @@ impl FileAssetReader { } } +/// A writer for the local filesystem. pub struct FileAssetWriter { root_path: PathBuf, } impl FileAssetWriter { - /// Creates a new `FileAssetIo` at a path relative to the executable's directory, optionally + /// Creates a new [`FileAssetWriter`] at a path relative to the executable's directory, optionally /// watching for changes. - /// - /// See `get_base_path` below. pub fn new + core::fmt::Debug>(path: P, create_root: bool) -> Self { let root_path = get_base_path().join(path.as_ref()); if create_root { diff --git a/crates/bevy_asset/src/io/gated.rs b/crates/bevy_asset/src/io/gated.rs index ab3e0998e2..fa4f0f0d3f 100644 --- a/crates/bevy_asset/src/io/gated.rs +++ b/crates/bevy_asset/src/io/gated.rs @@ -1,6 +1,6 @@ use crate::io::{AssetReader, AssetReaderError, PathStream, Reader}; use alloc::{boxed::Box, sync::Arc}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use crossbeam_channel::{Receiver, Sender}; use parking_lot::RwLock; use std::path::Path; diff --git a/crates/bevy_asset/src/io/memory.rs b/crates/bevy_asset/src/io/memory.rs index 5b9e5389c2..4c56057ff9 100644 --- a/crates/bevy_asset/src/io/memory.rs +++ b/crates/bevy_asset/src/io/memory.rs @@ -1,6 +1,6 @@ use crate::io::{AssetReader, AssetReaderError, PathStream, Reader}; use alloc::{borrow::ToOwned, boxed::Box, sync::Arc, vec::Vec}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use core::{pin::Pin, task::Poll}; use futures_io::AsyncRead; use futures_lite::{ready, Stream}; @@ -60,8 +60,7 @@ impl Dir { dir = self.get_or_insert_dir(parent); } let key: Box = path.file_name().unwrap().to_string_lossy().into(); - let data = dir.0.write().assets.remove(&key); - data + dir.0.write().assets.remove(&key) } pub fn insert_meta(&self, path: &Path, value: impl Into) { diff --git a/crates/bevy_asset/src/io/source.rs b/crates/bevy_asset/src/io/source.rs index 37bdc306b7..4852a2a71f 100644 --- a/crates/bevy_asset/src/io/source.rs +++ b/crates/bevy_asset/src/io/source.rs @@ -9,7 +9,7 @@ use alloc::{ }; use atomicow::CowArc; use bevy_ecs::resource::Resource; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use core::{fmt::Display, hash::Hash, time::Duration}; use thiserror::Error; use tracing::{error, warn}; @@ -132,8 +132,11 @@ impl<'a> PartialEq for AssetSourceId<'a> { /// and whether or not the source is processed. #[derive(Default)] pub struct AssetSourceBuilder { + /// The [`ErasedAssetReader`] to use on the unprocessed asset. pub reader: Option Box + Send + Sync>>, + /// The [`ErasedAssetWriter`] to use on the unprocessed asset. pub writer: Option Option> + Send + Sync>>, + /// The [`AssetWatcher`] to use for unprocessed assets, if any. pub watcher: Option< Box< dyn FnMut(crossbeam_channel::Sender) -> Option> @@ -141,9 +144,12 @@ pub struct AssetSourceBuilder { + Sync, >, >, + /// The [`ErasedAssetReader`] to use for processed assets. pub processed_reader: Option Box + Send + Sync>>, + /// The [`ErasedAssetWriter`] to use for processed assets. pub processed_writer: Option Option> + Send + Sync>>, + /// The [`AssetWatcher`] to use for processed assets, if any. pub processed_watcher: Option< Box< dyn FnMut(crossbeam_channel::Sender) -> Option> @@ -151,7 +157,9 @@ pub struct AssetSourceBuilder { + Sync, >, >, + /// The warning message to display when watching an unprocessed asset fails. pub watch_warning: Option<&'static str>, + /// The warning message to display when watching a processed asset fails. pub processed_watch_warning: Option<&'static str>, } diff --git a/crates/bevy_asset/src/io/wasm.rs b/crates/bevy_asset/src/io/wasm.rs index 0a078a4b03..586e095409 100644 --- a/crates/bevy_asset/src/io/wasm.rs +++ b/crates/bevy_asset/src/io/wasm.rs @@ -53,7 +53,7 @@ fn js_value_to_err(context: &str) -> impl FnOnce(JsValue) -> std::io::Error + '_ impl HttpWasmAssetReader { // Also used by HttpSourceAssetReader - pub(crate) async fn fetch_bytes<'a>( + pub(crate) async fn fetch_bytes( &self, path: PathBuf, ) -> Result { diff --git a/crates/bevy_asset/src/lib.rs b/crates/bevy_asset/src/lib.rs index 032bdb1e0c..92dfc0af22 100644 --- a/crates/bevy_asset/src/lib.rs +++ b/crates/bevy_asset/src/lib.rs @@ -223,10 +223,10 @@ use bevy_app::{App, Plugin, PostUpdate, PreUpdate}; use bevy_ecs::prelude::Component; use bevy_ecs::{ reflect::AppTypeRegistry, - schedule::{IntoSystemConfigs, IntoSystemSetConfigs, SystemSet}, + schedule::{IntoScheduleConfigs, SystemSet}, world::FromWorld, }; -use bevy_platform_support::collections::HashSet; +use bevy_platform::collections::HashSet; use bevy_reflect::{FromReflect, GetTypeRegistration, Reflect, TypePath}; use core::any::TypeId; use tracing::error; @@ -261,6 +261,33 @@ pub struct AssetPlugin { pub mode: AssetMode, /// How/If asset meta files should be checked. pub meta_check: AssetMetaCheck, + /// How to handle load requests of files that are outside the approved directories. + /// + /// Approved folders are [`AssetPlugin::file_path`] and the folder of each + /// [`AssetSource`](io::AssetSource). Subfolders within these folders are also valid. + pub unapproved_path_mode: UnapprovedPathMode, +} + +/// Determines how to react to attempts to load assets not inside the approved folders. +/// +/// Approved folders are [`AssetPlugin::file_path`] and the folder of each +/// [`AssetSource`](io::AssetSource). Subfolders within these folders are also valid. +/// +/// It is strongly discouraged to use [`Allow`](UnapprovedPathMode::Allow) if your +/// app will include scripts or modding support, as it could allow allow arbitrary file +/// access for malicious code. +/// +/// See [`AssetPath::is_unapproved`](crate::AssetPath::is_unapproved) +#[derive(Clone, Default)] +pub enum UnapprovedPathMode { + /// Unapproved asset loading is allowed. This is strongly discouraged. + Allow, + /// Fails to load any asset that is is unapproved, unless an override method is used, like + /// [`AssetServer::load_override`]. + Deny, + /// Fails to load any asset that is is unapproved. + #[default] + Forbid, } /// Controls whether or not assets are pre-processed before being loaded. @@ -314,6 +341,7 @@ impl Default for AssetPlugin { processed_file_path: Self::DEFAULT_PROCESSED_FILE_PATH.to_string(), watch_for_changes_override: None, meta_check: AssetMetaCheck::default(), + unapproved_path_mode: UnapprovedPathMode::default(), } } } @@ -357,6 +385,7 @@ impl Plugin for AssetPlugin { AssetServerMode::Unprocessed, self.meta_check.clone(), watch, + self.unapproved_path_mode.clone(), )); } AssetMode::Processed => { @@ -373,6 +402,7 @@ impl Plugin for AssetPlugin { AssetServerMode::Processed, AssetMetaCheck::Always, watch, + self.unapproved_path_mode.clone(), )) .insert_resource(processor) .add_systems(bevy_app::Startup, AssetProcessor::start); @@ -386,6 +416,7 @@ impl Plugin for AssetPlugin { AssetServerMode::Processed, AssetMetaCheck::Always, watch, + self.unapproved_path_mode.clone(), )); } } @@ -505,8 +536,8 @@ pub trait AssetApp { /// * Initializing the [`AssetEvent`] resource for the [`Asset`] /// * Adding other relevant systems and resources for the [`Asset`] /// * Ignoring schedule ambiguities in [`Assets`] resource. Any time a system takes - /// mutable access to this resource this causes a conflict, but they rarely actually - /// modify the same underlying asset. + /// mutable access to this resource this causes a conflict, but they rarely actually + /// modify the same underlying asset. fn init_asset(&mut self) -> &mut Self; /// Registers the asset type `T` using `[App::register]`, /// and adds [`ReflectAsset`] type data to `T` and [`ReflectHandle`] type data to [`Handle`] in the type registry. @@ -645,7 +676,7 @@ mod tests { }, loader::{AssetLoader, LoadContext}, Asset, AssetApp, AssetEvent, AssetId, AssetLoadError, AssetLoadFailedEvent, AssetPath, - AssetPlugin, AssetServer, Assets, + AssetPlugin, AssetServer, Assets, LoadState, UnapprovedPathMode, }; use alloc::{ boxed::Box, @@ -661,8 +692,7 @@ mod tests { prelude::*, schedule::{LogLevel, ScheduleBuildSettings}, }; - use bevy_log::LogPlugin; - use bevy_platform_support::collections::HashMap; + use bevy_platform::collections::HashMap; use bevy_reflect::TypePath; use core::time::Duration; use serde::{Deserialize, Serialize}; @@ -830,11 +860,7 @@ mod tests { AssetSourceId::Default, AssetSource::build().with_reader(move || Box::new(gated_memory_reader.clone())), ) - .add_plugins(( - TaskPoolPlugin::default(), - LogPlugin::default(), - AssetPlugin::default(), - )); + .add_plugins((TaskPoolPlugin::default(), AssetPlugin::default())); (app, gate_opener) } @@ -1732,11 +1758,7 @@ mod tests { "unstable", AssetSource::build().with_reader(move || Box::new(unstable_reader.clone())), ) - .add_plugins(( - TaskPoolPlugin::default(), - LogPlugin::default(), - AssetPlugin::default(), - )) + .add_plugins((TaskPoolPlugin::default(), AssetPlugin::default())) .init_asset::() .register_asset_loader(CoolTextLoader) .init_resource::() @@ -1784,6 +1806,79 @@ mod tests { app.world_mut().run_schedule(Update); } + // This test is not checking a requirement, but documenting a current limitation. We simply are + // not capable of loading subassets when doing nested immediate loads. + #[test] + fn error_on_nested_immediate_load_of_subasset() { + let mut app = App::new(); + + let dir = Dir::default(); + dir.insert_asset_text( + Path::new("a.cool.ron"), + r#"( + text: "b", + dependencies: [], + embedded_dependencies: [], + sub_texts: ["A"], +)"#, + ); + dir.insert_asset_text(Path::new("empty.txt"), ""); + + app.register_asset_source( + AssetSourceId::Default, + AssetSource::build() + .with_reader(move || Box::new(MemoryAssetReader { root: dir.clone() })), + ) + .add_plugins((TaskPoolPlugin::default(), AssetPlugin::default())); + + app.init_asset::() + .init_asset::() + .register_asset_loader(CoolTextLoader); + + struct NestedLoadOfSubassetLoader; + + impl AssetLoader for NestedLoadOfSubassetLoader { + type Asset = TestAsset; + type Error = crate::loader::LoadDirectError; + type Settings = (); + + async fn load( + &self, + _: &mut dyn Reader, + _: &Self::Settings, + load_context: &mut LoadContext<'_>, + ) -> Result { + // We expect this load to fail. + load_context + .loader() + .immediate() + .load::("a.cool.ron#A") + .await?; + Ok(TestAsset) + } + + fn extensions(&self) -> &[&str] { + &["txt"] + } + } + + app.init_asset::() + .register_asset_loader(NestedLoadOfSubassetLoader); + + let asset_server = app.world().resource::().clone(); + let handle = asset_server.load::("empty.txt"); + + run_app_until(&mut app, |_world| match asset_server.load_state(&handle) { + LoadState::Loading => None, + LoadState::Failed(err) => { + let error_message = format!("{err}"); + assert!(error_message.contains("Requested to load an asset path (a.cool.ron#A) with a subasset, but this is unsupported"), "what? \"{error_message}\""); + Some(()) + } + state => panic!("Unexpected asset state: {state:?}"), + }); + } + // validate the Asset derive macro for various asset types #[derive(Asset, TypePath)] pub struct TestAsset; @@ -1817,4 +1912,91 @@ mod tests { #[derive(Asset, TypePath)] pub struct TupleTestAsset(#[dependency] Handle); + + fn unapproved_path_setup(mode: UnapprovedPathMode) -> App { + let dir = Dir::default(); + let a_path = "../a.cool.ron"; + let a_ron = r#" +( + text: "a", + dependencies: [], + embedded_dependencies: [], + sub_texts: [], +)"#; + + dir.insert_asset_text(Path::new(a_path), a_ron); + + let mut app = App::new(); + let memory_reader = MemoryAssetReader { root: dir }; + app.register_asset_source( + AssetSourceId::Default, + AssetSource::build().with_reader(move || Box::new(memory_reader.clone())), + ) + .add_plugins(( + TaskPoolPlugin::default(), + AssetPlugin { + unapproved_path_mode: mode, + ..Default::default() + }, + )); + app.init_asset::(); + + app + } + + fn load_a_asset(assets: Res) { + let a = assets.load::("../a.cool.ron"); + if a == Handle::default() { + panic!() + } + } + + fn load_a_asset_override(assets: Res) { + let a = assets.load_override::("../a.cool.ron"); + if a == Handle::default() { + panic!() + } + } + + #[test] + #[should_panic] + fn unapproved_path_forbid_should_panic() { + let mut app = unapproved_path_setup(UnapprovedPathMode::Forbid); + + fn uses_assets(_asset: ResMut>) {} + app.add_systems(Update, (uses_assets, load_a_asset_override)); + + app.world_mut().run_schedule(Update); + } + + #[test] + #[should_panic] + fn unapproved_path_deny_should_panic() { + let mut app = unapproved_path_setup(UnapprovedPathMode::Deny); + + fn uses_assets(_asset: ResMut>) {} + app.add_systems(Update, (uses_assets, load_a_asset)); + + app.world_mut().run_schedule(Update); + } + + #[test] + fn unapproved_path_deny_should_finish() { + let mut app = unapproved_path_setup(UnapprovedPathMode::Deny); + + fn uses_assets(_asset: ResMut>) {} + app.add_systems(Update, (uses_assets, load_a_asset_override)); + + app.world_mut().run_schedule(Update); + } + + #[test] + fn unapproved_path_allow_should_finish() { + let mut app = unapproved_path_setup(UnapprovedPathMode::Allow); + + fn uses_assets(_asset: ResMut>) {} + app.add_systems(Update, (uses_assets, load_a_asset)); + + app.world_mut().run_schedule(Update); + } } diff --git a/crates/bevy_asset/src/loader.rs b/crates/bevy_asset/src/loader.rs index 75192159a8..8f4863b885 100644 --- a/crates/bevy_asset/src/loader.rs +++ b/crates/bevy_asset/src/loader.rs @@ -13,7 +13,7 @@ use alloc::{ }; use atomicow::CowArc; use bevy_ecs::world::World; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_tasks::{BoxedFuture, ConditionalSendFuture}; use core::any::{Any, TypeId}; use downcast_rs::{impl_downcast, Downcast}; @@ -296,10 +296,14 @@ impl AssetContainer for A { /// [`NestedLoader::load`]: crate::NestedLoader::load /// [immediately]: crate::Immediate #[derive(Error, Debug)] -#[error("Failed to load dependency {dependency:?} {error}")] -pub struct LoadDirectError { - pub dependency: AssetPath<'static>, - pub error: AssetLoadError, +pub enum LoadDirectError { + #[error("Requested to load an asset path ({0:?}) with a subasset, but this is unsupported. See issue #18291")] + RequestedSubasset(AssetPath<'static>), + #[error("Failed to load dependency {dependency:?} {error}")] + LoadError { + dependency: AssetPath<'static>, + error: AssetLoadError, + }, } /// An error that occurs while deserializing [`AssetMeta`]. @@ -477,8 +481,8 @@ impl<'a> LoadContext<'a> { let path = path.into(); let source = self.asset_server.get_source(path.source())?; let asset_reader = match self.asset_server.mode() { - AssetServerMode::Unprocessed { .. } => source.reader(), - AssetServerMode::Processed { .. } => source.processed_reader()?, + AssetServerMode::Unprocessed => source.reader(), + AssetServerMode::Processed => source.processed_reader()?, }; let mut reader = asset_reader.read(path.path()).await?; let hash = if self.populate_hashes { @@ -537,7 +541,7 @@ impl<'a> LoadContext<'a> { self.populate_hashes, ) .await - .map_err(|error| LoadDirectError { + .map_err(|error| LoadDirectError::LoadError { dependency: path.clone(), error, })?; diff --git a/crates/bevy_asset/src/loader_builders.rs b/crates/bevy_asset/src/loader_builders.rs index 3d75027f5d..13bea2b71d 100644 --- a/crates/bevy_asset/src/loader_builders.rs +++ b/crates/bevy_asset/src/loader_builders.rs @@ -305,9 +305,12 @@ impl NestedLoader<'_, '_, StaticTyped, Deferred> { pub fn load<'c, A: Asset>(self, path: impl Into>) -> Handle { let path = path.into().to_owned(); let handle = if self.load_context.should_load_dependencies { - self.load_context - .asset_server - .load_with_meta_transform(path, self.meta_transform, ()) + self.load_context.asset_server.load_with_meta_transform( + path, + self.meta_transform, + (), + true, + ) } else { self.load_context .asset_server @@ -387,13 +390,16 @@ impl<'builder, 'reader, T> NestedLoader<'_, '_, T, Immediate<'builder, 'reader>> path: &AssetPath<'static>, asset_type_id: Option, ) -> Result<(Arc, ErasedLoadedAsset), LoadDirectError> { + if path.label().is_some() { + return Err(LoadDirectError::RequestedSubasset(path.clone())); + } let (mut meta, loader, mut reader) = if let Some(reader) = self.mode.reader { let loader = if let Some(asset_type_id) = asset_type_id { self.load_context .asset_server .get_asset_loader_with_asset_type_id(asset_type_id) .await - .map_err(|error| LoadDirectError { + .map_err(|error| LoadDirectError::LoadError { dependency: path.clone(), error: error.into(), })? @@ -402,7 +408,7 @@ impl<'builder, 'reader, T> NestedLoader<'_, '_, T, Immediate<'builder, 'reader>> .asset_server .get_path_asset_loader(path) .await - .map_err(|error| LoadDirectError { + .map_err(|error| LoadDirectError::LoadError { dependency: path.clone(), error: error.into(), })? @@ -415,7 +421,7 @@ impl<'builder, 'reader, T> NestedLoader<'_, '_, T, Immediate<'builder, 'reader>> .asset_server .get_meta_loader_and_reader(path, asset_type_id) .await - .map_err(|error| LoadDirectError { + .map_err(|error| LoadDirectError::LoadError { dependency: path.clone(), error, })?; @@ -453,15 +459,17 @@ impl NestedLoader<'_, '_, StaticTyped, Immediate<'_, '_>> { self.load_internal(&path, Some(TypeId::of::())) .await .and_then(move |(loader, untyped_asset)| { - untyped_asset.downcast::().map_err(|_| LoadDirectError { - dependency: path.clone(), - error: AssetLoadError::RequestedHandleTypeMismatch { - path, - requested: TypeId::of::(), - actual_asset_name: loader.asset_type_name(), - loader_name: loader.type_name(), - }, - }) + untyped_asset + .downcast::() + .map_err(|_| LoadDirectError::LoadError { + dependency: path.clone(), + error: AssetLoadError::RequestedHandleTypeMismatch { + path, + requested: TypeId::of::(), + actual_asset_name: loader.asset_type_name(), + loader_name: loader.type_name(), + }, + }) }) } } diff --git a/crates/bevy_asset/src/path.rs b/crates/bevy_asset/src/path.rs index 3038c52fba..ad127812dc 100644 --- a/crates/bevy_asset/src/path.rs +++ b/crates/bevy_asset/src/path.rs @@ -18,10 +18,10 @@ use thiserror::Error; /// /// Asset paths consist of three main parts: /// * [`AssetPath::source`]: The name of the [`AssetSource`](crate::io::AssetSource) to load the asset from. -/// This is optional. If one is not set the default source will be used (which is the `assets` folder by default). +/// This is optional. If one is not set the default source will be used (which is the `assets` folder by default). /// * [`AssetPath::path`]: The "virtual filesystem path" pointing to an asset source file. /// * [`AssetPath::label`]: An optional "named sub asset". When assets are loaded, they are -/// allowed to load "sub assets" of any type, which are identified by a named "label". +/// allowed to load "sub assets" of any type, which are identified by a named "label". /// /// Asset paths are generally constructed (and visualized) as strings: /// @@ -53,7 +53,7 @@ use thiserror::Error; /// This also means that you should use [`AssetPath::parse`] in cases where `&str` is the explicit type. #[derive(Eq, PartialEq, Hash, Clone, Default, Reflect)] #[reflect(opaque)] -#[reflect(Debug, PartialEq, Hash, Serialize, Deserialize)] +#[reflect(Debug, PartialEq, Hash, Clone, Serialize, Deserialize)] pub struct AssetPath<'a> { source: AssetSourceId<'a>, path: CowArc<'a, Path>, @@ -478,6 +478,51 @@ impl<'a> AssetPath<'a> { } }) } + + /// Returns `true` if this [`AssetPath`] points to a file that is + /// outside of it's [`AssetSource`](crate::io::AssetSource) folder. + /// + /// ## Example + /// ``` + /// # use bevy_asset::AssetPath; + /// // Inside the default AssetSource. + /// let path = AssetPath::parse("thingy.png"); + /// assert!( ! path.is_unapproved()); + /// let path = AssetPath::parse("gui/thingy.png"); + /// assert!( ! path.is_unapproved()); + /// + /// // Inside a different AssetSource. + /// let path = AssetPath::parse("embedded://thingy.png"); + /// assert!( ! path.is_unapproved()); + /// + /// // Exits the `AssetSource`s directory. + /// let path = AssetPath::parse("../thingy.png"); + /// assert!(path.is_unapproved()); + /// let path = AssetPath::parse("folder/../../thingy.png"); + /// assert!(path.is_unapproved()); + /// + /// // This references the linux root directory. + /// let path = AssetPath::parse("/home/thingy.png"); + /// assert!(path.is_unapproved()); + /// ``` + pub fn is_unapproved(&self) -> bool { + use std::path::Component; + let mut simplified = PathBuf::new(); + for component in self.path.components() { + match component { + Component::Prefix(_) | Component::RootDir => return true, + Component::CurDir => {} + Component::ParentDir => { + if !simplified.pop() { + return true; + } + } + Component::Normal(os_str) => simplified.push(os_str), + } + } + + false + } } impl AssetPath<'static> { diff --git a/crates/bevy_asset/src/processor/log.rs b/crates/bevy_asset/src/processor/log.rs index 533a87d830..f4a0f81862 100644 --- a/crates/bevy_asset/src/processor/log.rs +++ b/crates/bevy_asset/src/processor/log.rs @@ -5,7 +5,7 @@ use alloc::{ vec::Vec, }; use async_fs::File; -use bevy_platform_support::collections::HashSet; +use bevy_platform::collections::HashSet; use futures_lite::{AsyncReadExt, AsyncWriteExt}; use std::path::PathBuf; use thiserror::Error; @@ -32,8 +32,10 @@ pub struct ProcessorTransactionLog { /// An error that occurs when reading from the [`ProcessorTransactionLog`] fails. #[derive(Error, Debug)] pub enum ReadLogError { + /// An invalid log line was encountered, consisting of the contained string. #[error("Encountered an invalid log line: '{0}'")] InvalidLine(String), + /// A file-system-based error occurred while reading the log file. #[error("Failed to read log file: {0}")] Io(#[from] futures_io::Error), } @@ -51,10 +53,13 @@ pub struct WriteLogError { /// An error that occurs when validating the [`ProcessorTransactionLog`] fails. #[derive(Error, Debug)] pub enum ValidateLogError { + /// An error that could not be recovered from. All assets will be reprocessed. #[error("Encountered an unrecoverable error. All assets will be reprocessed.")] UnrecoverableError, + /// A [`ReadLogError`]. #[error(transparent)] ReadLogError(#[from] ReadLogError), + /// Duplicated process asset transactions occurred. #[error("Encountered a duplicate process asset transaction: {0:?}")] EntryErrors(Vec), } @@ -62,10 +67,13 @@ pub enum ValidateLogError { /// An error that occurs when validating individual [`ProcessorTransactionLog`] entries. #[derive(Error, Debug)] pub enum LogEntryError { + /// A duplicate process asset transaction occurred for the given asset path. #[error("Encountered a duplicate process asset transaction: {0}")] DuplicateTransaction(AssetPath<'static>), + /// A transaction was ended that never started for the given asset path. #[error("A transaction was ended that never started {0}")] EndedMissingTransaction(AssetPath<'static>), + /// An asset started processing but never finished at the given asset path. #[error("An asset started processing but never finished: {0}")] UnfinishedTransaction(AssetPath<'static>), } diff --git a/crates/bevy_asset/src/processor/mod.rs b/crates/bevy_asset/src/processor/mod.rs index bfc9a295d8..a239d66a9b 100644 --- a/crates/bevy_asset/src/processor/mod.rs +++ b/crates/bevy_asset/src/processor/mod.rs @@ -54,11 +54,11 @@ use crate::{ AssetMetaDyn, AssetMetaMinimal, ProcessedInfo, ProcessedInfoMinimal, }, AssetLoadError, AssetMetaCheck, AssetPath, AssetServer, AssetServerMode, DeserializeMetaError, - MissingAssetLoaderForExtensionError, + MissingAssetLoaderForExtensionError, UnapprovedPathMode, WriteDefaultMetaError, }; use alloc::{borrow::ToOwned, boxed::Box, collections::VecDeque, sync::Arc, vec, vec::Vec}; use bevy_ecs::prelude::*; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_tasks::IoTaskPool; use futures_io::ErrorKind; use futures_lite::{AsyncReadExt, AsyncWriteExt, StreamExt}; @@ -122,6 +122,7 @@ impl AssetProcessor { AssetServerMode::Processed, AssetMetaCheck::Always, false, + UnapprovedPathMode::default(), ); Self { server, data } } @@ -207,10 +208,13 @@ impl AssetProcessor { /// Processes all assets. This will: /// * For each "processed [`AssetSource`]: /// * Scan the [`ProcessorTransactionLog`] and recover from any failures detected - /// * Scan the processed [`AssetReader`](crate::io::AssetReader) to build the current view of already processed assets. - /// * Scan the unprocessed [`AssetReader`](crate::io::AssetReader) and remove any final processed assets that are invalid or no longer exist. - /// * For each asset in the unprocessed [`AssetReader`](crate::io::AssetReader), kick off a new "process job", which will process the asset - /// (if the latest version of the asset has not been processed). + /// * Scan the processed [`AssetReader`](crate::io::AssetReader) to build the current view of + /// already processed assets. + /// * Scan the unprocessed [`AssetReader`](crate::io::AssetReader) and remove any final + /// processed assets that are invalid or no longer exist. + /// * For each asset in the unprocessed [`AssetReader`](crate::io::AssetReader), kick off a new + /// "process job", which will process the asset + /// (if the latest version of the asset has not been processed). #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] pub fn process_assets(&self) { let start_time = std::time::Instant::now(); @@ -258,6 +262,58 @@ impl AssetProcessor { } } + /// Writes the default meta file for the provided `path`. + /// + /// This function generates the appropriate meta file to process `path` with the default + /// processor. If there is no default processor, it falls back to the default loader. + /// + /// Note if there is already a meta file for `path`, this function returns + /// `Err(WriteDefaultMetaError::MetaAlreadyExists)`. + pub async fn write_default_meta_file_for_path( + &self, + path: impl Into>, + ) -> Result<(), WriteDefaultMetaError> { + let path = path.into(); + let Some(processor) = path + .get_full_extension() + .and_then(|extension| self.get_default_processor(&extension)) + else { + return self + .server + .write_default_loader_meta_file_for_path(path) + .await; + }; + + let meta = processor.default_meta(); + let serialized_meta = meta.serialize(); + + let source = self.get_source(path.source())?; + + // Note: we get the reader rather than the processed reader, since we want to write the meta + // file for the unprocessed version of that asset (so it will be processed by the default + // processor). + let reader = source.reader(); + match reader.read_meta_bytes(path.path()).await { + Ok(_) => return Err(WriteDefaultMetaError::MetaAlreadyExists), + Err(AssetReaderError::NotFound(_)) => { + // The meta file couldn't be found so just fall through. + } + Err(AssetReaderError::Io(err)) => { + return Err(WriteDefaultMetaError::IoErrorFromExistingMetaCheck(err)) + } + Err(AssetReaderError::HttpError(err)) => { + return Err(WriteDefaultMetaError::HttpErrorFromExistingMetaCheck(err)) + } + } + + let writer = source.writer()?; + writer + .write_meta_bytes(path.path(), &serialized_meta) + .await?; + + Ok(()) + } + async fn handle_asset_source_event(&self, source: &AssetSource, event: AssetSourceEvent) { trace!("{event:?}"); match event { @@ -800,12 +856,6 @@ impl AssetProcessor { } }; let meta_bytes = meta.serialize(); - // write meta to source location if it doesn't already exist - source - .writer()? - .write_meta_bytes(path, &meta_bytes) - .await - .map_err(writer_err)?; (meta, meta_bytes, processor) } Err(err) => { diff --git a/crates/bevy_asset/src/render_asset.rs b/crates/bevy_asset/src/render_asset.rs index 3bbc3dfd48..583ee45457 100644 --- a/crates/bevy_asset/src/render_asset.rs +++ b/crates/bevy_asset/src/render_asset.rs @@ -27,9 +27,11 @@ bitflags::bitflags! { #[repr(transparent)] #[derive(Serialize, Deserialize, Hash, Clone, Copy, PartialEq, Eq, Debug, Reflect)] #[reflect(opaque)] - #[reflect(Serialize, Deserialize, Hash, PartialEq, Debug)] + #[reflect(Serialize, Deserialize, Hash, Clone, PartialEq, Debug)] pub struct RenderAssetUsages: u8 { + /// The bit flag for the main world. const MAIN_WORLD = 1 << 0; + /// The bit flag for the render world. const RENDER_WORLD = 1 << 1; } } diff --git a/crates/bevy_asset/src/saver.rs b/crates/bevy_asset/src/saver.rs index c8b308a544..c8b96012ee 100644 --- a/crates/bevy_asset/src/saver.rs +++ b/crates/bevy_asset/src/saver.rs @@ -4,7 +4,7 @@ use crate::{ }; use alloc::boxed::Box; use atomicow::CowArc; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_tasks::{BoxedFuture, ConditionalSendFuture}; use core::{borrow::Borrow, hash::Hash, ops::Deref}; use serde::{Deserialize, Serialize}; diff --git a/crates/bevy_asset/src/server/info.rs b/crates/bevy_asset/src/server/info.rs index e519bf86ae..1b3bb3cb65 100644 --- a/crates/bevy_asset/src/server/info.rs +++ b/crates/bevy_asset/src/server/info.rs @@ -11,7 +11,7 @@ use alloc::{ vec::Vec, }; use bevy_ecs::world::World; -use bevy_platform_support::collections::{hash_map::Entry, HashMap, HashSet}; +use bevy_platform::collections::{hash_map::Entry, HashMap, HashSet}; use bevy_tasks::Task; use bevy_utils::TypeIdMap; use core::{any::TypeId, task::Waker}; @@ -347,14 +347,9 @@ impl AssetInfos { /// Returns `true` if the asset this path points to is still alive pub(crate) fn is_path_alive<'a>(&self, path: impl Into>) -> bool { - let path = path.into(); - - let result = self - .get_path_ids(&path) + self.get_path_ids(&path.into()) .filter_map(|id| self.infos.get(&id)) - .any(|info| info.weak_handle.strong_count() > 0); - - result + .any(|info| info.weak_handle.strong_count() > 0) } /// Returns `true` if the asset at this path should be reloaded diff --git a/crates/bevy_asset/src/server/loaders.rs b/crates/bevy_asset/src/server/loaders.rs index 1250ff666a..08384e9efe 100644 --- a/crates/bevy_asset/src/server/loaders.rs +++ b/crates/bevy_asset/src/server/loaders.rs @@ -4,7 +4,7 @@ use crate::{ }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use async_broadcast::RecvError; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_tasks::IoTaskPool; use bevy_utils::TypeIdMap; use core::any::TypeId; diff --git a/crates/bevy_asset/src/server/mod.rs b/crates/bevy_asset/src/server/mod.rs index 0a5c59c629..ff5800474d 100644 --- a/crates/bevy_asset/src/server/mod.rs +++ b/crates/bevy_asset/src/server/mod.rs @@ -5,7 +5,8 @@ use crate::{ folder::LoadedFolder, io::{ AssetReaderError, AssetSource, AssetSourceEvent, AssetSourceId, AssetSources, - ErasedAssetReader, MissingAssetSourceError, MissingProcessedAssetReaderError, Reader, + AssetWriterError, ErasedAssetReader, MissingAssetSourceError, MissingAssetWriterError, + MissingProcessedAssetReaderError, Reader, }, loader::{AssetLoader, ErasedAssetLoader, LoadContext, LoadedAsset}, meta::{ @@ -14,8 +15,8 @@ use crate::{ }, path::AssetPath, Asset, AssetEvent, AssetHandleProvider, AssetId, AssetLoadFailedEvent, AssetMetaCheck, Assets, - DeserializeMetaError, ErasedLoadedAsset, Handle, LoadedUntypedAsset, UntypedAssetId, - UntypedAssetLoadFailedEvent, UntypedHandle, + DeserializeMetaError, ErasedLoadedAsset, Handle, LoadedUntypedAsset, UnapprovedPathMode, + UntypedAssetId, UntypedAssetLoadFailedEvent, UntypedHandle, }; use alloc::{borrow::ToOwned, boxed::Box, vec, vec::Vec}; use alloc::{ @@ -25,7 +26,7 @@ use alloc::{ }; use atomicow::CowArc; use bevy_ecs::prelude::*; -use bevy_platform_support::collections::HashSet; +use bevy_platform::collections::HashSet; use bevy_tasks::IoTaskPool; use core::{any::TypeId, future::Future, panic::AssertUnwindSafe, task::Poll}; use crossbeam_channel::{Receiver, Sender}; @@ -38,12 +39,13 @@ use std::path::{Path, PathBuf}; use thiserror::Error; use tracing::{error, info}; -/// Loads and tracks the state of [`Asset`] values from a configured [`AssetReader`](crate::io::AssetReader). This can be used to kick off new asset loads and -/// retrieve their current load states. +/// Loads and tracks the state of [`Asset`] values from a configured [`AssetReader`](crate::io::AssetReader). +/// This can be used to kick off new asset loads and retrieve their current load states. /// /// The general process to load an asset is: -/// 1. Initialize a new [`Asset`] type with the [`AssetServer`] via [`AssetApp::init_asset`], which will internally call [`AssetServer::register_asset`] -/// and set up related ECS [`Assets`] storage and systems. +/// 1. Initialize a new [`Asset`] type with the [`AssetServer`] via [`AssetApp::init_asset`], which +/// will internally call [`AssetServer::register_asset`] and set up related ECS [`Assets`] +/// storage and systems. /// 2. Register one or more [`AssetLoader`]s for that asset with [`AssetApp::init_asset_loader`] /// 3. Add the asset to your asset folder (defaults to `assets`). /// 4. Call [`AssetServer::load`] with a path to your asset. @@ -66,6 +68,7 @@ pub(crate) struct AssetServerData { sources: AssetSources, mode: AssetServerMode, meta_check: AssetMetaCheck, + unapproved_path_mode: UnapprovedPathMode, } /// The "asset mode" the server is currently in. @@ -80,13 +83,19 @@ pub enum AssetServerMode { impl AssetServer { /// Create a new instance of [`AssetServer`]. If `watch_for_changes` is true, the [`AssetReader`](crate::io::AssetReader) storage will watch for changes to /// asset sources and hot-reload them. - pub fn new(sources: AssetSources, mode: AssetServerMode, watching_for_changes: bool) -> Self { + pub fn new( + sources: AssetSources, + mode: AssetServerMode, + watching_for_changes: bool, + unapproved_path_mode: UnapprovedPathMode, + ) -> Self { Self::new_with_loaders( sources, Default::default(), mode, AssetMetaCheck::Always, watching_for_changes, + unapproved_path_mode, ) } @@ -97,6 +106,7 @@ impl AssetServer { mode: AssetServerMode, meta_check: AssetMetaCheck, watching_for_changes: bool, + unapproved_path_mode: UnapprovedPathMode, ) -> Self { Self::new_with_loaders( sources, @@ -104,6 +114,7 @@ impl AssetServer { mode, meta_check, watching_for_changes, + unapproved_path_mode, ) } @@ -113,6 +124,7 @@ impl AssetServer { mode: AssetServerMode, meta_check: AssetMetaCheck, watching_for_changes: bool, + unapproved_path_mode: UnapprovedPathMode, ) -> Self { let (asset_event_sender, asset_event_receiver) = crossbeam_channel::unbounded(); let mut infos = AssetInfos::default(); @@ -126,6 +138,7 @@ impl AssetServer { asset_event_receiver, loaders, infos: RwLock::new(infos), + unapproved_path_mode, }), } } @@ -309,7 +322,16 @@ impl AssetServer { /// The asset load will fail and an error will be printed to the logs if the asset stored at `path` is not of type `A`. #[must_use = "not using the returned strong handle may result in the unexpected release of the asset"] pub fn load<'a, A: Asset>(&self, path: impl Into>) -> Handle { - self.load_with_meta_transform(path, None, ()) + self.load_with_meta_transform(path, None, (), false) + } + + /// Same as [`load`](AssetServer::load), but you can load assets from unaproved paths + /// if [`AssetPlugin::unapproved_path_mode`](super::AssetPlugin::unapproved_path_mode) + /// is [`Deny`](UnapprovedPathMode::Deny). + /// + /// See [`UnapprovedPathMode`] and [`AssetPath::is_unapproved`] + pub fn load_override<'a, A: Asset>(&self, path: impl Into>) -> Handle { + self.load_with_meta_transform(path, None, (), true) } /// Begins loading an [`Asset`] of type `A` stored at `path` while holding a guard item. @@ -333,7 +355,20 @@ impl AssetServer { path: impl Into>, guard: G, ) -> Handle { - self.load_with_meta_transform(path, None, guard) + self.load_with_meta_transform(path, None, guard, false) + } + + /// Same as [`load`](AssetServer::load_acquire), but you can load assets from unaproved paths + /// if [`AssetPlugin::unapproved_path_mode`](super::AssetPlugin::unapproved_path_mode) + /// is [`Deny`](UnapprovedPathMode::Deny). + /// + /// See [`UnapprovedPathMode`] and [`AssetPath::is_unapproved`] + pub fn load_acquire_override<'a, A: Asset, G: Send + Sync + 'static>( + &self, + path: impl Into>, + guard: G, + ) -> Handle { + self.load_with_meta_transform(path, None, guard, true) } /// Begins loading an [`Asset`] of type `A` stored at `path`. The given `settings` function will override the asset's @@ -345,7 +380,30 @@ impl AssetServer { path: impl Into>, settings: impl Fn(&mut S) + Send + Sync + 'static, ) -> Handle { - self.load_with_meta_transform(path, Some(loader_settings_meta_transform(settings)), ()) + self.load_with_meta_transform( + path, + Some(loader_settings_meta_transform(settings)), + (), + false, + ) + } + + /// Same as [`load`](AssetServer::load_with_settings), but you can load assets from unaproved paths + /// if [`AssetPlugin::unapproved_path_mode`](super::AssetPlugin::unapproved_path_mode) + /// is [`Deny`](UnapprovedPathMode::Deny). + /// + /// See [`UnapprovedPathMode`] and [`AssetPath::is_unapproved`] + pub fn load_with_settings_override<'a, A: Asset, S: Settings>( + &self, + path: impl Into>, + settings: impl Fn(&mut S) + Send + Sync + 'static, + ) -> Handle { + self.load_with_meta_transform( + path, + Some(loader_settings_meta_transform(settings)), + (), + true, + ) } /// Begins loading an [`Asset`] of type `A` stored at `path` while holding a guard item. @@ -364,7 +422,36 @@ impl AssetServer { settings: impl Fn(&mut S) + Send + Sync + 'static, guard: G, ) -> Handle { - self.load_with_meta_transform(path, Some(loader_settings_meta_transform(settings)), guard) + self.load_with_meta_transform( + path, + Some(loader_settings_meta_transform(settings)), + guard, + false, + ) + } + + /// Same as [`load`](AssetServer::load_acquire_with_settings), but you can load assets from unaproved paths + /// if [`AssetPlugin::unapproved_path_mode`](super::AssetPlugin::unapproved_path_mode) + /// is [`Deny`](UnapprovedPathMode::Deny). + /// + /// See [`UnapprovedPathMode`] and [`AssetPath::is_unapproved`] + pub fn load_acquire_with_settings_override< + 'a, + A: Asset, + S: Settings, + G: Send + Sync + 'static, + >( + &self, + path: impl Into>, + settings: impl Fn(&mut S) + Send + Sync + 'static, + guard: G, + ) -> Handle { + self.load_with_meta_transform( + path, + Some(loader_settings_meta_transform(settings)), + guard, + true, + ) } pub(crate) fn load_with_meta_transform<'a, A: Asset, G: Send + Sync + 'static>( @@ -372,8 +459,20 @@ impl AssetServer { path: impl Into>, meta_transform: Option, guard: G, + override_unapproved: bool, ) -> Handle { let path = path.into().into_owned(); + + if path.is_unapproved() { + match (&self.data.unapproved_path_mode, override_unapproved) { + (UnapprovedPathMode::Allow, _) | (UnapprovedPathMode::Deny, true) => {} + (UnapprovedPathMode::Deny, false) | (UnapprovedPathMode::Forbid, _) => { + error!("Asset path {path} is unapproved. See UnapprovedPathMode for details."); + return Handle::default(); + } + } + } + let mut infos = self.data.infos.write(); let (handle, should_load) = infos.get_or_create_path_handle::( path.clone(), @@ -917,8 +1016,8 @@ impl AssetServer { }; let asset_reader = match server.data.mode { - AssetServerMode::Unprocessed { .. } => source.reader(), - AssetServerMode::Processed { .. } => match source.processed_reader() { + AssetServerMode::Unprocessed => source.reader(), + AssetServerMode::Processed => match source.processed_reader() { Ok(reader) => reader, Err(_) => { error!( @@ -1229,8 +1328,8 @@ impl AssetServer { // Then the meta reader, if meta exists, will correspond to the meta for the current "version" of the asset. // See ProcessedAssetInfo::file_transaction_lock for more context let asset_reader = match self.data.mode { - AssetServerMode::Unprocessed { .. } => source.reader(), - AssetServerMode::Processed { .. } => source.processed_reader()?, + AssetServerMode::Unprocessed => source.reader(), + AssetServerMode::Processed => source.processed_reader()?, }; let reader = asset_reader.read(asset_path.path()).await?; let read_meta = match &self.data.meta_check { @@ -1472,6 +1571,50 @@ impl AssetServer { } } } + + /// Writes the default loader meta file for the provided `path`. + /// + /// This function only generates meta files that simply load the path directly. To generate a + /// meta file that will use the default asset processor for the path, see + /// [`AssetProcessor::write_default_meta_file_for_path`]. + /// + /// Note if there is already a meta file for `path`, this function returns + /// `Err(WriteDefaultMetaError::MetaAlreadyExists)`. + /// + /// [`AssetProcessor::write_default_meta_file_for_path`]: crate::AssetProcessor::write_default_meta_file_for_path + pub async fn write_default_loader_meta_file_for_path( + &self, + path: impl Into>, + ) -> Result<(), WriteDefaultMetaError> { + let path = path.into(); + let loader = self.get_path_asset_loader(&path).await?; + + let meta = loader.default_meta(); + let serialized_meta = meta.serialize(); + + let source = self.get_source(path.source())?; + + let reader = source.reader(); + match reader.read_meta_bytes(path.path()).await { + Ok(_) => return Err(WriteDefaultMetaError::MetaAlreadyExists), + Err(AssetReaderError::NotFound(_)) => { + // The meta file couldn't be found so just fall through. + } + Err(AssetReaderError::Io(err)) => { + return Err(WriteDefaultMetaError::IoErrorFromExistingMetaCheck(err)) + } + Err(AssetReaderError::HttpError(err)) => { + return Err(WriteDefaultMetaError::HttpErrorFromExistingMetaCheck(err)) + } + } + + let writer = source.writer()?; + writer + .write_meta_bytes(path.path(), &serialized_meta) + .await?; + + Ok(()) + } } /// A system that manages internal [`AssetServer`] events, such as finalizing asset loads. @@ -1578,14 +1721,14 @@ pub fn handle_internal_asset_events(world: &mut World) { for source in server.data.sources.iter() { match server.data.mode { - AssetServerMode::Unprocessed { .. } => { + AssetServerMode::Unprocessed => { if let Some(receiver) = source.event_receiver() { for event in receiver.try_iter() { handle_event(source.id(), event); } } } - AssetServerMode::Processed { .. } => { + AssetServerMode::Processed => { if let Some(receiver) = source.processed_event_receiver() { for event in receiver.try_iter() { handle_event(source.id(), event); @@ -1731,6 +1874,10 @@ impl RecursiveDependencyLoadState { /// An error that occurs during an [`Asset`] load. #[derive(Error, Debug, Clone)] +#[expect( + missing_docs, + reason = "Adding docs to the variants would not add information beyond the error message and the names" +)] pub enum AssetLoadError { #[error("Requested handle of type {requested:?} for asset '{path}' does not match actual asset type '{actual_asset_name}', which used loader '{loader_name}'")] RequestedHandleTypeMismatch { @@ -1792,6 +1939,7 @@ pub enum AssetLoadError { }, } +/// An error that can occur during asset loading. #[derive(Error, Debug, Clone)] #[error("Failed to load asset '{path}' with asset loader '{loader_name}': {error}")] pub struct AssetLoaderError { @@ -1801,11 +1949,13 @@ pub struct AssetLoaderError { } impl AssetLoaderError { + /// The path of the asset that failed to load. pub fn path(&self) -> &AssetPath<'static> { &self.path } } +/// An error that occurs while resolving an asset added by `add_async`. #[derive(Error, Debug, Clone)] #[error("An error occurred while resolving an asset added by `add_async`: {error}")] pub struct AddAsyncError { @@ -1823,13 +1973,15 @@ pub struct MissingAssetLoaderForExtensionError { #[derive(Error, Debug, Clone, PartialEq, Eq)] #[error("no `AssetLoader` found with the name '{type_name}'")] pub struct MissingAssetLoaderForTypeNameError { - type_name: String, + /// The type name that was not found. + pub type_name: String, } /// An error that occurs when an [`AssetLoader`] is not registered for a given [`Asset`] [`TypeId`]. #[derive(Error, Debug, Clone, PartialEq, Eq)] #[error("no `AssetLoader` found with the ID '{type_id:?}'")] pub struct MissingAssetLoaderForTypeIdError { + /// The type ID that was not found. pub type_id: TypeId, } @@ -1860,10 +2012,31 @@ const UNTYPED_SOURCE_SUFFIX: &str = "--untyped"; /// An error when attempting to wait asynchronously for an [`Asset`] to load. #[derive(Error, Debug, Clone)] pub enum WaitForAssetError { + /// The asset is not being loaded; waiting for it is meaningless. #[error("tried to wait for an asset that is not being loaded")] NotLoaded, + /// The asset failed to load. #[error(transparent)] Failed(Arc), + /// A dependency of the asset failed to load. #[error(transparent)] DependencyFailed(Arc), } + +#[derive(Error, Debug)] +pub enum WriteDefaultMetaError { + #[error(transparent)] + MissingAssetLoader(#[from] MissingAssetLoaderForExtensionError), + #[error(transparent)] + MissingAssetSource(#[from] MissingAssetSourceError), + #[error(transparent)] + MissingAssetWriter(#[from] MissingAssetWriterError), + #[error("failed to write default asset meta file: {0}")] + FailedToWriteMeta(#[from] AssetWriterError), + #[error("asset meta file already exists, so avoiding overwrite")] + MetaAlreadyExists, + #[error("encountered an I/O error while reading the existing meta file: {0}")] + IoErrorFromExistingMetaCheck(Arc), + #[error("encountered HTTP status {0} when reading the existing meta file")] + HttpErrorFromExistingMetaCheck(u16), +} diff --git a/crates/bevy_asset/src/transformer.rs b/crates/bevy_asset/src/transformer.rs index 8b2a8d09be..802e3aeaa7 100644 --- a/crates/bevy_asset/src/transformer.rs +++ b/crates/bevy_asset/src/transformer.rs @@ -1,7 +1,7 @@ use crate::{meta::Settings, Asset, ErasedLoadedAsset, Handle, LabeledAsset, UntypedHandle}; use alloc::boxed::Box; use atomicow::CowArc; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_tasks::ConditionalSendFuture; use core::{ borrow::Borrow, @@ -254,6 +254,7 @@ pub struct IdentityAssetTransformer { } impl IdentityAssetTransformer { + /// Creates a new [`IdentityAssetTransformer`] with the correct internal [`PhantomData`] field. pub const fn new() -> Self { Self { _phantom: PhantomData, diff --git a/crates/bevy_audio/Cargo.toml b/crates/bevy_audio/Cargo.toml index 4d7967977e..84060fe26b 100644 --- a/crates/bevy_audio/Cargo.toml +++ b/crates/bevy_audio/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_audio" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides audio functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -14,9 +14,7 @@ bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ - "bevy", -] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } @@ -28,9 +26,16 @@ tracing = { version = "0.1", default-features = false, features = ["std"] } cpal = { version = "0.15", optional = true } [target.'cfg(target_arch = "wasm32")'.dependencies] +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. rodio = { version = "0.20", default-features = false, features = [ "wasm-bindgen", ] } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } [features] mp3 = ["rodio/mp3"] diff --git a/crates/bevy_audio/src/audio.rs b/crates/bevy_audio/src/audio.rs index 1d0149381b..349cf6b6a4 100644 --- a/crates/bevy_audio/src/audio.rs +++ b/crates/bevy_audio/src/audio.rs @@ -6,6 +6,7 @@ use bevy_reflect::prelude::*; /// The way Bevy manages the sound playback. #[derive(Debug, Clone, Copy, Reflect)] +#[reflect(Clone)] pub enum PlaybackMode { /// Play the sound once. Do nothing when it ends. /// @@ -29,7 +30,7 @@ pub enum PlaybackMode { /// [`AudioSink`][crate::AudioSink] or [`SpatialAudioSink`][crate::SpatialAudioSink] /// components. Changes to this component will *not* be applied to already-playing audio. #[derive(Component, Clone, Copy, Debug, Reflect)] -#[reflect(Default, Component, Debug)] +#[reflect(Clone, Default, Component, Debug)] pub struct PlaybackSettings { /// The desired playback behavior. pub mode: PlaybackMode, @@ -74,7 +75,7 @@ impl PlaybackSettings { /// added again. pub const ONCE: PlaybackSettings = PlaybackSettings { mode: PlaybackMode::Once, - volume: Volume(1.0), + volume: Volume::Linear(1.0), speed: 1.0, paused: false, muted: false, @@ -142,7 +143,7 @@ impl PlaybackSettings { /// This must be accompanied by `Transform` and `GlobalTransform`. /// Only one entity with a `SpatialListener` should be present at any given time. #[derive(Component, Clone, Debug, Reflect)] -#[reflect(Default, Component, Debug)] +#[reflect(Clone, Default, Component, Debug)] pub struct SpatialListener { /// Left ear position relative to the `GlobalTransform`. pub left_ear_offset: Vec3, @@ -174,6 +175,7 @@ impl SpatialListener { /// /// Default is `Vec3::ONE`. #[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Clone, Default)] pub struct SpatialScale(pub Vec3); impl SpatialScale { @@ -202,7 +204,7 @@ impl Default for SpatialScale { /// /// Default is `Vec3::ONE`. #[derive(Resource, Default, Clone, Copy, Reflect)] -#[reflect(Resource, Default)] +#[reflect(Resource, Default, Clone)] pub struct DefaultSpatialScale(pub SpatialScale); /// A component for playing a sound. @@ -218,7 +220,7 @@ pub struct DefaultSpatialScale(pub SpatialScale); /// Playback can be configured using the [`PlaybackSettings`] component. Note that changes to the /// `PlaybackSettings` component will *not* affect already-playing audio. #[derive(Component, Reflect)] -#[reflect(Component)] +#[reflect(Component, Clone)] #[require(PlaybackSettings)] pub struct AudioPlayer(pub Handle) where diff --git a/crates/bevy_audio/src/audio_output.rs b/crates/bevy_audio/src/audio_output.rs index c098ac8382..1869fb4755 100644 --- a/crates/bevy_audio/src/audio_output.rs +++ b/crates/bevy_audio/src/audio_output.rs @@ -170,7 +170,7 @@ pub(crate) fn play_queued_audio_system( } sink.set_speed(settings.speed); - sink.set_volume(settings.volume.0 * global_volume.volume.0); + sink.set_volume(settings.volume * global_volume.volume); if settings.paused { sink.pause(); @@ -210,7 +210,7 @@ pub(crate) fn play_queued_audio_system( } sink.set_speed(settings.speed); - sink.set_volume(settings.volume.0 * global_volume.volume.0); + sink.set_volume(settings.volume * global_volume.volume); if settings.paused { sink.pause(); diff --git a/crates/bevy_audio/src/sinks.rs b/crates/bevy_audio/src/sinks.rs index d4be43261f..b0c77456e1 100644 --- a/crates/bevy_audio/src/sinks.rs +++ b/crates/bevy_audio/src/sinks.rs @@ -3,37 +3,26 @@ use bevy_math::Vec3; use bevy_transform::prelude::Transform; use rodio::{Sink, SpatialSink}; +use crate::Volume; + /// Common interactions with an audio sink. pub trait AudioSinkPlayback { - /// Gets the volume of the sound. - /// - /// The value `1.0` is the "normal" volume (unfiltered input). Any value - /// other than `1.0` will multiply each sample by this value. + /// Gets the volume of the sound as a [`Volume`]. /// /// If the sink is muted, this returns the managed volume rather than the - /// sink's actual volume. This allows you to use the volume as if the sink - /// were not muted, because a muted sink has a volume of 0. - fn volume(&self) -> f32; + /// sink's actual volume. This allows you to use the returned volume as if + /// the sink were not muted, because a muted sink has a physical volume of + /// 0. + fn volume(&self) -> Volume; - /// Changes the volume of the sound. - /// - /// The value `1.0` is the "normal" volume (unfiltered input). Any value other than `1.0` - /// will multiply each sample by this value. + /// Changes the volume of the sound to the given [`Volume`]. /// /// If the sink is muted, changing the volume won't unmute it, i.e. the - /// sink's volume will remain at `0.0`. However, the sink will remember the - /// volume change and it will be used when [`unmute`](Self::unmute) is - /// called. This allows you to control the volume even when the sink is - /// muted. - /// - /// # Note on Audio Volume - /// - /// An increase of 10 decibels (dB) roughly corresponds to the perceived volume doubling in intensity. - /// As this function scales not the volume but the amplitude, a conversion might be necessary. - /// For example, to halve the perceived volume you need to decrease the volume by 10 dB. - /// This corresponds to 20log(x) = -10dB, solving x = 10^(-10/20) = 0.316. - /// Multiply the current volume by 0.316 to halve the perceived volume. - fn set_volume(&mut self, volume: f32); + /// sink's volume will remain "off" / "muted". However, the sink will + /// remember the volume change and it will be used when + /// [`unmute`](Self::unmute) is called. This allows you to control the + /// volume even when the sink is muted. + fn set_volume(&mut self, volume: Volume); /// Gets the speed of the sound. /// @@ -132,7 +121,7 @@ pub struct AudioSink { /// If the sink is muted, this is `Some(volume)` where `volume` is the /// user's intended volume setting, even if the underlying sink's volume is /// 0. - pub(crate) managed_volume: Option, + pub(crate) managed_volume: Option, } impl AudioSink { @@ -146,15 +135,16 @@ impl AudioSink { } impl AudioSinkPlayback for AudioSink { - fn volume(&self) -> f32 { - self.managed_volume.unwrap_or_else(|| self.sink.volume()) + fn volume(&self) -> Volume { + self.managed_volume + .unwrap_or_else(|| Volume::Linear(self.sink.volume())) } - fn set_volume(&mut self, volume: f32) { + fn set_volume(&mut self, volume: Volume) { if self.is_muted() { self.managed_volume = Some(volume); } else { - self.sink.set_volume(volume); + self.sink.set_volume(volume.to_linear()); } } @@ -197,7 +187,7 @@ impl AudioSinkPlayback for AudioSink { fn unmute(&mut self) { if let Some(volume) = self.managed_volume.take() { - self.sink.set_volume(volume); + self.sink.set_volume(volume.to_linear()); } } } @@ -227,7 +217,7 @@ pub struct SpatialAudioSink { /// If the sink is muted, this is `Some(volume)` where `volume` is the /// user's intended volume setting, even if the underlying sink's volume is /// 0. - pub(crate) managed_volume: Option, + pub(crate) managed_volume: Option, } impl SpatialAudioSink { @@ -241,15 +231,16 @@ impl SpatialAudioSink { } impl AudioSinkPlayback for SpatialAudioSink { - fn volume(&self) -> f32 { - self.managed_volume.unwrap_or_else(|| self.sink.volume()) + fn volume(&self) -> Volume { + self.managed_volume + .unwrap_or_else(|| Volume::Linear(self.sink.volume())) } - fn set_volume(&mut self, volume: f32) { + fn set_volume(&mut self, volume: Volume) { if self.is_muted() { self.managed_volume = Some(volume); } else { - self.sink.set_volume(volume); + self.sink.set_volume(volume.to_linear()); } } @@ -292,7 +283,7 @@ impl AudioSinkPlayback for SpatialAudioSink { fn unmute(&mut self) { if let Some(volume) = self.managed_volume.take() { - self.sink.set_volume(volume); + self.sink.set_volume(volume.to_linear()); } } } @@ -326,11 +317,11 @@ mod tests { fn test_audio_sink_playback(mut audio_sink: T) { // Test volume - assert_eq!(audio_sink.volume(), 1.0); // default volume - audio_sink.set_volume(0.5); - assert_eq!(audio_sink.volume(), 0.5); - audio_sink.set_volume(1.0); - assert_eq!(audio_sink.volume(), 1.0); + assert_eq!(audio_sink.volume(), Volume::Linear(1.0)); // default volume + audio_sink.set_volume(Volume::Linear(0.5)); + assert_eq!(audio_sink.volume(), Volume::Linear(0.5)); + audio_sink.set_volume(Volume::Linear(1.0)); + assert_eq!(audio_sink.volume(), Volume::Linear(1.0)); // Test speed assert_eq!(audio_sink.speed(), 1.0); // default speed @@ -361,11 +352,11 @@ mod tests { assert!(!audio_sink.is_muted()); // Test volume with mute - audio_sink.set_volume(0.5); + audio_sink.set_volume(Volume::Linear(0.5)); audio_sink.mute(); - assert_eq!(audio_sink.volume(), 0.5); // returns managed volume even though sink volume is 0 + assert_eq!(audio_sink.volume(), Volume::Linear(0.5)); // returns managed volume even though sink volume is 0 audio_sink.unmute(); - assert_eq!(audio_sink.volume(), 0.5); // managed volume is restored + assert_eq!(audio_sink.volume(), Volume::Linear(0.5)); // managed volume is restored // Test toggle mute audio_sink.toggle_mute(); diff --git a/crates/bevy_audio/src/volume.rs b/crates/bevy_audio/src/volume.rs index f12fe0497f..b1378ae485 100644 --- a/crates/bevy_audio/src/volume.rs +++ b/crates/bevy_audio/src/volume.rs @@ -1,48 +1,504 @@ -use bevy_derive::Deref; use bevy_ecs::prelude::*; +use bevy_math::ops; use bevy_reflect::prelude::*; /// Use this [`Resource`] to control the global volume of all audio. /// -/// Note: changing this value will not affect already playing audio. -#[derive(Resource, Default, Clone, Copy, Reflect)] -#[reflect(Resource, Default)] +/// Note: Changing [`GlobalVolume`] does not affect already playing audio. +#[derive(Resource, Debug, Default, Clone, Copy, Reflect)] +#[reflect(Resource, Debug, Default, Clone)] pub struct GlobalVolume { /// The global volume of all audio. pub volume: Volume, } +impl From for GlobalVolume { + fn from(volume: Volume) -> Self { + Self { volume } + } +} + impl GlobalVolume { /// Create a new [`GlobalVolume`] with the given volume. - pub fn new(volume: f32) -> Self { - Self { - volume: Volume::new(volume), + pub fn new(volume: Volume) -> Self { + Self { volume } + } +} + +/// A [`Volume`] represents an audio source's volume level. +/// +/// To create a new [`Volume`] from a linear scale value, use +/// [`Volume::Linear`]. +/// +/// To create a new [`Volume`] from decibels, use [`Volume::Decibels`]. +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Clone, Debug, PartialEq)] +pub enum Volume { + /// Create a new [`Volume`] from the given volume in linear scale. + /// + /// In a linear scale, the value `1.0` represents the "normal" volume, + /// meaning the audio is played at its original level. Values greater than + /// `1.0` increase the volume, while values between `0.0` and `1.0` decrease + /// the volume. A value of `0.0` effectively mutes the audio. + /// + /// # Examples + /// + /// ``` + /// # use bevy_audio::Volume; + /// # use bevy_math::ops; + /// # + /// # const EPSILON: f32 = 0.01; + /// + /// let volume = Volume::Linear(0.5); + /// assert_eq!(volume.to_linear(), 0.5); + /// assert!(ops::abs(volume.to_decibels() - -6.0206) < EPSILON); + /// + /// let volume = Volume::Linear(0.0); + /// assert_eq!(volume.to_linear(), 0.0); + /// assert_eq!(volume.to_decibels(), f32::NEG_INFINITY); + /// + /// let volume = Volume::Linear(1.0); + /// assert_eq!(volume.to_linear(), 1.0); + /// assert!(ops::abs(volume.to_decibels() - 0.0) < EPSILON); + /// ``` + Linear(f32), + /// Create a new [`Volume`] from the given volume in decibels. + /// + /// In a decibel scale, the value `0.0` represents the "normal" volume, + /// meaning the audio is played at its original level. Values greater than + /// `0.0` increase the volume, while values less than `0.0` decrease the + /// volume. A value of [`f32::NEG_INFINITY`] decibels effectively mutes the + /// audio. + /// + /// # Examples + /// + /// ``` + /// # use bevy_audio::Volume; + /// # use bevy_math::ops; + /// # + /// # const EPSILON: f32 = 0.01; + /// + /// let volume = Volume::Decibels(-5.998); + /// assert!(ops::abs(volume.to_linear() - 0.5) < EPSILON); + /// + /// let volume = Volume::Decibels(f32::NEG_INFINITY); + /// assert_eq!(volume.to_linear(), 0.0); + /// + /// let volume = Volume::Decibels(0.0); + /// assert_eq!(volume.to_linear(), 1.0); + /// + /// let volume = Volume::Decibels(20.0); + /// assert_eq!(volume.to_linear(), 10.0); + /// ``` + Decibels(f32), +} + +impl Default for Volume { + fn default() -> Self { + Self::Linear(1.0) + } +} + +impl PartialEq for Volume { + fn eq(&self, other: &Self) -> bool { + use Volume::{Decibels, Linear}; + + match (self, other) { + (Linear(a), Linear(b)) => a.abs() == b.abs(), + (Decibels(a), Decibels(b)) => a == b, + (a, b) => a.to_decibels() == b.to_decibels(), } } } -/// A volume level equivalent to a non-negative float. -#[derive(Clone, Copy, Deref, Debug, Reflect)] -#[reflect(Debug)] -pub struct Volume(pub(crate) f32); +impl PartialOrd for Volume { + fn partial_cmp(&self, other: &Self) -> Option { + use Volume::{Decibels, Linear}; -impl Default for Volume { - fn default() -> Self { - Self(1.0) + Some(match (self, other) { + (Linear(a), Linear(b)) => a.abs().total_cmp(&b.abs()), + (Decibels(a), Decibels(b)) => a.total_cmp(b), + (a, b) => a.to_decibels().total_cmp(&b.to_decibels()), + }) } } +#[inline] +fn decibels_to_linear(decibels: f32) -> f32 { + ops::powf(10.0f32, decibels / 20.0) +} + +#[inline] +fn linear_to_decibels(linear: f32) -> f32 { + 20.0 * ops::log10(linear.abs()) +} + impl Volume { - /// Create a new volume level. - pub fn new(volume: f32) -> Self { - debug_assert!(volume >= 0.0); - Self(f32::max(volume, 0.)) - } - /// Get the value of the volume level. - pub fn get(&self) -> f32 { - self.0 + /// Returns the volume in linear scale as a float. + pub fn to_linear(&self) -> f32 { + match self { + Self::Linear(v) => v.abs(), + Self::Decibels(v) => decibels_to_linear(*v), + } } - /// Zero (silent) volume level - pub const ZERO: Self = Volume(0.0); + /// Returns the volume in decibels as a float. + /// + /// If the volume is silent / off / muted, i.e. it's underlying linear scale + /// is `0.0`, this method returns negative infinity. + pub fn to_decibels(&self) -> f32 { + match self { + Self::Linear(v) => linear_to_decibels(*v), + Self::Decibels(v) => *v, + } + } + + /// The silent volume. Also known as "off" or "muted". + pub const SILENT: Self = Volume::Linear(0.0); +} + +impl core::ops::Add for Volume { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + use Volume::{Decibels, Linear}; + + match (self, rhs) { + (Linear(a), Linear(b)) => Linear(a + b), + (Decibels(a), Decibels(b)) => Decibels(linear_to_decibels( + decibels_to_linear(a) + decibels_to_linear(b), + )), + // {Linear, Decibels} favors the left hand side of the operation by + // first converting the right hand side to the same type as the left + // hand side and then performing the operation. + (Linear(..), Decibels(db)) => self + Linear(decibels_to_linear(db)), + (Decibels(..), Linear(l)) => self + Decibels(linear_to_decibels(l)), + } + } +} + +impl core::ops::AddAssign for Volume { + fn add_assign(&mut self, rhs: Self) { + *self = *self + rhs; + } +} + +impl core::ops::Sub for Volume { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + use Volume::{Decibels, Linear}; + + match (self, rhs) { + (Linear(a), Linear(b)) => Linear(a - b), + (Decibels(a), Decibels(b)) => Decibels(linear_to_decibels( + decibels_to_linear(a) - decibels_to_linear(b), + )), + // {Linear, Decibels} favors the left hand side of the operation by + // first converting the right hand side to the same type as the left + // hand side and then performing the operation. + (Linear(..), Decibels(db)) => self - Linear(decibels_to_linear(db)), + (Decibels(..), Linear(l)) => self - Decibels(linear_to_decibels(l)), + } + } +} + +impl core::ops::SubAssign for Volume { + fn sub_assign(&mut self, rhs: Self) { + *self = *self - rhs; + } +} + +impl core::ops::Mul for Volume { + type Output = Self; + + fn mul(self, rhs: Self) -> Self { + use Volume::{Decibels, Linear}; + + match (self, rhs) { + (Linear(a), Linear(b)) => Linear(a * b), + (Decibels(a), Decibels(b)) => Decibels(a + b), + // {Linear, Decibels} favors the left hand side of the operation by + // first converting the right hand side to the same type as the left + // hand side and then performing the operation. + (Linear(..), Decibels(db)) => self * Linear(decibels_to_linear(db)), + (Decibels(..), Linear(l)) => self * Decibels(linear_to_decibels(l)), + } + } +} + +impl core::ops::MulAssign for Volume { + fn mul_assign(&mut self, rhs: Self) { + *self = *self * rhs; + } +} + +impl core::ops::Div for Volume { + type Output = Self; + + fn div(self, rhs: Self) -> Self { + use Volume::{Decibels, Linear}; + + match (self, rhs) { + (Linear(a), Linear(b)) => Linear(a / b), + (Decibels(a), Decibels(b)) => Decibels(a - b), + // {Linear, Decibels} favors the left hand side of the operation by + // first converting the right hand side to the same type as the left + // hand side and then performing the operation. + (Linear(..), Decibels(db)) => self / Linear(decibels_to_linear(db)), + (Decibels(..), Linear(l)) => self / Decibels(linear_to_decibels(l)), + } + } +} + +impl core::ops::DivAssign for Volume { + fn div_assign(&mut self, rhs: Self) { + *self = *self / rhs; + } +} + +#[cfg(test)] +mod tests { + use super::Volume::{self, Decibels, Linear}; + + /// Based on [Wikipedia's Decibel article]. + /// + /// [Wikipedia's Decibel article]: https://web.archive.org/web/20230810185300/https://en.wikipedia.org/wiki/Decibel + const DECIBELS_LINEAR_TABLE: [(f32, f32); 27] = [ + (100., 100000.), + (90., 31623.), + (80., 10000.), + (70., 3162.), + (60., 1000.), + (50., 316.2), + (40., 100.), + (30., 31.62), + (20., 10.), + (10., 3.162), + (5.998, 1.995), + (3.003, 1.413), + (1.002, 1.122), + (0., 1.), + (-1.002, 0.891), + (-3.003, 0.708), + (-5.998, 0.501), + (-10., 0.3162), + (-20., 0.1), + (-30., 0.03162), + (-40., 0.01), + (-50., 0.003162), + (-60., 0.001), + (-70., 0.0003162), + (-80., 0.0001), + (-90., 0.00003162), + (-100., 0.00001), + ]; + + #[test] + fn volume_conversion() { + for (db, linear) in DECIBELS_LINEAR_TABLE { + for volume in [Linear(linear), Decibels(db), Linear(-linear)] { + let db_test = volume.to_decibels(); + let linear_test = volume.to_linear(); + + let db_delta = db_test - db; + let linear_relative_delta = (linear_test - linear) / linear; + + assert!( + db_delta.abs() < 1e-2, + "Expected ~{}dB, got {}dB (delta {})", + db, + db_test, + db_delta + ); + assert!( + linear_relative_delta.abs() < 1e-3, + "Expected ~{}, got {} (relative delta {})", + linear, + linear_test, + linear_relative_delta + ); + } + } + } + + #[test] + fn volume_conversion_special() { + assert!( + Decibels(f32::INFINITY).to_linear().is_infinite(), + "Infinite decibels is equivalent to infinite linear scale" + ); + assert!( + Linear(f32::INFINITY).to_decibels().is_infinite(), + "Infinite linear scale is equivalent to infinite decibels" + ); + + assert!( + Linear(f32::NEG_INFINITY).to_decibels().is_infinite(), + "Negative infinite linear scale is equivalent to infinite decibels" + ); + assert!( + Decibels(f32::NEG_INFINITY).to_linear().abs() == 0.0, + "Negative infinity decibels is equivalent to zero linear scale" + ); + + assert!( + Linear(0.0).to_decibels().is_infinite(), + "Zero linear scale is equivalent to negative infinity decibels" + ); + assert!( + Linear(-0.0).to_decibels().is_infinite(), + "Negative zero linear scale is equivalent to negative infinity decibels" + ); + + assert!( + Decibels(f32::NAN).to_linear().is_nan(), + "NaN decibels is equivalent to NaN linear scale" + ); + assert!( + Linear(f32::NAN).to_decibels().is_nan(), + "NaN linear scale is equivalent to NaN decibels" + ); + } + + fn assert_approx_eq(a: Volume, b: Volume) { + const EPSILON: f32 = 0.0001; + + match (a, b) { + (Decibels(a), Decibels(b)) | (Linear(a), Linear(b)) => assert!( + (a - b).abs() < EPSILON, + "Expected {:?} to be approximately equal to {:?}", + a, + b + ), + (a, b) => assert!( + (a.to_decibels() - b.to_decibels()).abs() < EPSILON, + "Expected {:?} to be approximately equal to {:?}", + a, + b + ), + } + } + + #[test] + fn volume_ops_add() { + // Linear to Linear. + assert_approx_eq(Linear(0.5) + Linear(0.5), Linear(1.0)); + assert_approx_eq(Linear(0.5) + Linear(0.1), Linear(0.6)); + assert_approx_eq(Linear(0.5) + Linear(-0.5), Linear(0.0)); + + // Decibels to Decibels. + assert_approx_eq(Decibels(0.0) + Decibels(0.0), Decibels(6.0206003)); + assert_approx_eq(Decibels(6.0) + Decibels(6.0), Decibels(12.020599)); + assert_approx_eq(Decibels(-6.0) + Decibels(-6.0), Decibels(0.020599423)); + + // {Linear, Decibels} favors the left hand side of the operation. + assert_approx_eq(Linear(0.5) + Decibels(0.0), Linear(1.5)); + assert_approx_eq(Decibels(0.0) + Linear(0.5), Decibels(3.521825)); + } + + #[test] + fn volume_ops_add_assign() { + // Linear to Linear. + let mut volume = Linear(0.5); + volume += Linear(0.5); + assert_approx_eq(volume, Linear(1.0)); + } + + #[test] + fn volume_ops_sub() { + // Linear to Linear. + assert_approx_eq(Linear(0.5) - Linear(0.5), Linear(0.0)); + assert_approx_eq(Linear(0.5) - Linear(0.1), Linear(0.4)); + assert_approx_eq(Linear(0.5) - Linear(-0.5), Linear(1.0)); + + // Decibels to Decibels. + assert_eq!(Decibels(0.0) - Decibels(0.0), Decibels(f32::NEG_INFINITY)); + assert_approx_eq(Decibels(6.0) - Decibels(4.0), Decibels(-7.736506)); + assert_eq!(Decibels(-6.0) - Decibels(-6.0), Decibels(f32::NEG_INFINITY)); + } + + #[test] + fn volume_ops_sub_assign() { + // Linear to Linear. + let mut volume = Linear(0.5); + volume -= Linear(0.5); + assert_approx_eq(volume, Linear(0.0)); + } + + #[test] + fn volume_ops_mul() { + // Linear to Linear. + assert_approx_eq(Linear(0.5) * Linear(0.5), Linear(0.25)); + assert_approx_eq(Linear(0.5) * Linear(0.1), Linear(0.05)); + assert_approx_eq(Linear(0.5) * Linear(-0.5), Linear(-0.25)); + + // Decibels to Decibels. + assert_approx_eq(Decibels(0.0) * Decibels(0.0), Decibels(0.0)); + assert_approx_eq(Decibels(6.0) * Decibels(6.0), Decibels(12.0)); + assert_approx_eq(Decibels(-6.0) * Decibels(-6.0), Decibels(-12.0)); + + // {Linear, Decibels} favors the left hand side of the operation. + assert_approx_eq(Linear(0.5) * Decibels(0.0), Linear(0.5)); + assert_approx_eq(Decibels(0.0) * Linear(0.501), Decibels(-6.003246)); + } + + #[test] + fn volume_ops_mul_assign() { + // Linear to Linear. + let mut volume = Linear(0.5); + volume *= Linear(0.5); + assert_approx_eq(volume, Linear(0.25)); + + // Decibels to Decibels. + let mut volume = Decibels(6.0); + volume *= Decibels(6.0); + assert_approx_eq(volume, Decibels(12.0)); + + // {Linear, Decibels} favors the left hand side of the operation. + let mut volume = Linear(0.5); + volume *= Decibels(0.0); + assert_approx_eq(volume, Linear(0.5)); + let mut volume = Decibels(0.0); + volume *= Linear(0.501); + assert_approx_eq(volume, Decibels(-6.003246)); + } + + #[test] + fn volume_ops_div() { + // Linear to Linear. + assert_approx_eq(Linear(0.5) / Linear(0.5), Linear(1.0)); + assert_approx_eq(Linear(0.5) / Linear(0.1), Linear(5.0)); + assert_approx_eq(Linear(0.5) / Linear(-0.5), Linear(-1.0)); + + // Decibels to Decibels. + assert_approx_eq(Decibels(0.0) / Decibels(0.0), Decibels(0.0)); + assert_approx_eq(Decibels(6.0) / Decibels(6.0), Decibels(0.0)); + assert_approx_eq(Decibels(-6.0) / Decibels(-6.0), Decibels(0.0)); + + // {Linear, Decibels} favors the left hand side of the operation. + assert_approx_eq(Linear(0.5) / Decibels(0.0), Linear(0.5)); + assert_approx_eq(Decibels(0.0) / Linear(0.501), Decibels(6.003246)); + } + + #[test] + fn volume_ops_div_assign() { + // Linear to Linear. + let mut volume = Linear(0.5); + volume /= Linear(0.5); + assert_approx_eq(volume, Linear(1.0)); + + // Decibels to Decibels. + let mut volume = Decibels(6.0); + volume /= Decibels(6.0); + assert_approx_eq(volume, Decibels(0.0)); + + // {Linear, Decibels} favors the left hand side of the operation. + let mut volume = Linear(0.5); + volume /= Decibels(0.0); + assert_approx_eq(volume, Linear(0.5)); + let mut volume = Decibels(0.0); + volume /= Linear(0.501); + assert_approx_eq(volume, Decibels(6.003246)); + } } diff --git a/crates/bevy_color/Cargo.toml b/crates/bevy_color/Cargo.toml index 7115963e7f..9b6d7d8cf6 100644 --- a/crates/bevy_color/Cargo.toml +++ b/crates/bevy_color/Cargo.toml @@ -1,21 +1,19 @@ [package] name = "bevy_color" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Types for representing and manipulating color values" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["bevy", "color"] -rust-version = "1.83.0" +rust-version = "1.85.0" [dependencies] bevy_math = { path = "../bevy_math", version = "0.16.0-dev", default-features = false, features = [ "curve", ] } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ - "bevy", -], optional = true } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, optional = true } bytemuck = { version = "1", features = ["derive"] } serde = { version = "1.0", features = [ "derive", @@ -27,13 +25,20 @@ encase = { version = "0.10", default-features = false, optional = true } [features] default = ["std", "bevy_reflect", "encase"] -std = ["alloc", "bevy_math/std", "serde?/std"] +std = [ + "alloc", + "bevy_math/std", + "serde?/std", + "wgpu-types?/std", + "bevy_reflect?/std", +] alloc = ["bevy_math/alloc", "serde?/alloc"] serialize = ["serde", "bevy_math/serialize"] -bevy_reflect = ["dep:bevy_reflect", "std"] -wgpu-types = ["dep:wgpu-types", "std"] +bevy_reflect = ["dep:bevy_reflect"] +wgpu-types = ["dep:wgpu-types"] encase = ["dep:encase", "std"] libm = ["bevy_math/libm"] +critical-section = ["bevy_reflect?/critical-section"] [lints] workspace = true diff --git a/crates/bevy_color/crates/gen_tests/Cargo.toml b/crates/bevy_color/crates/gen_tests/Cargo.toml index 357e7aaba6..e0f5940d50 100644 --- a/crates/bevy_color/crates/gen_tests/Cargo.toml +++ b/crates/bevy_color/crates/gen_tests/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "gen_tests" version = "0.1.0" -edition = "2021" +edition = "2024" publish = false [workspace] diff --git a/crates/bevy_color/src/color.rs b/crates/bevy_color/src/color.rs index d4754d204e..832394449b 100644 --- a/crates/bevy_color/src/color.rs +++ b/crates/bevy_color/src/color.rs @@ -1,6 +1,6 @@ use crate::{ color_difference::EuclideanDistance, Alpha, Hsla, Hsva, Hue, Hwba, Laba, Lcha, LinearRgba, - Luminance, Mix, Oklaba, Oklcha, Srgba, StandardColor, Xyza, + Luminance, Mix, Oklaba, Oklcha, Saturation, Srgba, StandardColor, Xyza, }; #[cfg(feature = "bevy_reflect")] use bevy_reflect::prelude::*; @@ -42,7 +42,11 @@ use derive_more::derive::From; /// To avoid the cost of repeated conversion, and ensure consistent results where that is desired, /// first convert this [`Color`] into your desired color space. #[derive(Debug, Clone, Copy, PartialEq, From)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -810,6 +814,44 @@ impl Hue for Color { } } +impl Saturation for Color { + fn with_saturation(&self, saturation: f32) -> Self { + let mut new = *self; + + match &mut new { + Color::Srgba(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::LinearRgba(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::Hsla(x) => x.with_saturation(saturation).into(), + Color::Hsva(x) => x.with_saturation(saturation).into(), + Color::Hwba(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::Laba(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::Lcha(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::Oklaba(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::Oklcha(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::Xyza(x) => Hsla::from(*x).with_saturation(saturation).into(), + } + } + + fn saturation(&self) -> f32 { + match self { + Color::Srgba(x) => Hsla::from(*x).saturation(), + Color::LinearRgba(x) => Hsla::from(*x).saturation(), + Color::Hsla(x) => x.saturation(), + Color::Hsva(x) => x.saturation(), + Color::Hwba(x) => Hsla::from(*x).saturation(), + Color::Laba(x) => Hsla::from(*x).saturation(), + Color::Lcha(x) => Hsla::from(*x).saturation(), + Color::Oklaba(x) => Hsla::from(*x).saturation(), + Color::Oklcha(x) => Hsla::from(*x).saturation(), + Color::Xyza(x) => Hsla::from(*x).saturation(), + } + } + + fn set_saturation(&mut self, saturation: f32) { + *self = self.with_saturation(saturation); + } +} + impl Mix for Color { fn mix(&self, other: &Self, factor: f32) -> Self { let mut new = *self; diff --git a/crates/bevy_color/src/color_ops.rs b/crates/bevy_color/src/color_ops.rs index 235c8c8bf3..776ee906f9 100644 --- a/crates/bevy_color/src/color_ops.rs +++ b/crates/bevy_color/src/color_ops.rs @@ -60,7 +60,7 @@ pub trait Alpha: Sized { /// Return a new version of this color with the given alpha value. fn with_alpha(&self, alpha: f32) -> Self; - /// Return a the alpha component of this color. + /// Return the alpha component of this color. fn alpha(&self) -> f32; /// Sets the alpha component of this color. @@ -95,6 +95,21 @@ pub trait Hue: Sized { } } +/// Trait for manipulating the saturation of a color. +/// +/// When working with color spaces that do not have native saturation components +/// the operations are performed in [`crate::Hsla`]. +pub trait Saturation: Sized { + /// Return a new version of this color with the saturation channel set to the given value. + fn with_saturation(&self, saturation: f32) -> Self; + + /// Return the saturation of this color [0.0, 1.0]. + fn saturation(&self) -> f32; + + /// Sets the saturation of this color. + fn set_saturation(&mut self, saturation: f32); +} + /// Trait with methods for converting colors to non-color types pub trait ColorToComponents { /// Convert to an f32 array diff --git a/crates/bevy_color/src/hsla.rs b/crates/bevy_color/src/hsla.rs index 6b26fbff8d..b29fce72ac 100644 --- a/crates/bevy_color/src/hsla.rs +++ b/crates/bevy_color/src/hsla.rs @@ -1,6 +1,6 @@ use crate::{ - Alpha, ColorToComponents, Gray, Hsva, Hue, Hwba, Lcha, LinearRgba, Luminance, Mix, Srgba, - StandardColor, Xyza, + Alpha, ColorToComponents, Gray, Hsva, Hue, Hwba, Lcha, LinearRgba, Luminance, Mix, Saturation, + Srgba, StandardColor, Xyza, }; use bevy_math::{Vec3, Vec4}; #[cfg(feature = "bevy_reflect")] @@ -13,7 +13,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -159,6 +163,26 @@ impl Hue for Hsla { } } +impl Saturation for Hsla { + #[inline] + fn with_saturation(&self, saturation: f32) -> Self { + Self { + saturation, + ..*self + } + } + + #[inline] + fn saturation(&self) -> f32 { + self.saturation + } + + #[inline] + fn set_saturation(&mut self, saturation: f32) { + self.saturation = saturation; + } +} + impl Luminance for Hsla { #[inline] fn with_luminance(&self, lightness: f32) -> Self { diff --git a/crates/bevy_color/src/hsva.rs b/crates/bevy_color/src/hsva.rs index e708ccf67e..9e94eb24f6 100644 --- a/crates/bevy_color/src/hsva.rs +++ b/crates/bevy_color/src/hsva.rs @@ -1,5 +1,6 @@ use crate::{ - Alpha, ColorToComponents, Gray, Hue, Hwba, Lcha, LinearRgba, Mix, Srgba, StandardColor, Xyza, + Alpha, ColorToComponents, Gray, Hue, Hwba, Lcha, LinearRgba, Mix, Saturation, Srgba, + StandardColor, Xyza, }; use bevy_math::{Vec3, Vec4}; #[cfg(feature = "bevy_reflect")] @@ -12,7 +13,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -129,6 +134,26 @@ impl Hue for Hsva { } } +impl Saturation for Hsva { + #[inline] + fn with_saturation(&self, saturation: f32) -> Self { + Self { + saturation, + ..*self + } + } + + #[inline] + fn saturation(&self) -> f32 { + self.saturation + } + + #[inline] + fn set_saturation(&mut self, saturation: f32) { + self.saturation = saturation; + } +} + impl From for Hwba { fn from( Hsva { diff --git a/crates/bevy_color/src/hwba.rs b/crates/bevy_color/src/hwba.rs index 459b5d82dc..36d328658d 100644 --- a/crates/bevy_color/src/hwba.rs +++ b/crates/bevy_color/src/hwba.rs @@ -16,7 +16,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_color/src/laba.rs b/crates/bevy_color/src/laba.rs index 39ac37f8ff..010b3df249 100644 --- a/crates/bevy_color/src/laba.rs +++ b/crates/bevy_color/src/laba.rs @@ -12,7 +12,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_color/src/lcha.rs b/crates/bevy_color/src/lcha.rs index f1437d3496..e5f5ecab32 100644 --- a/crates/bevy_color/src/lcha.rs +++ b/crates/bevy_color/src/lcha.rs @@ -12,7 +12,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_color/src/linear_rgba.rs b/crates/bevy_color/src/linear_rgba.rs index d1781bfc41..d00d765aac 100644 --- a/crates/bevy_color/src/linear_rgba.rs +++ b/crates/bevy_color/src/linear_rgba.rs @@ -13,7 +13,11 @@ use bytemuck::{Pod, Zeroable}; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq, Pod, Zeroable)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_color/src/oklaba.rs b/crates/bevy_color/src/oklaba.rs index 1281109d02..0203ca6a69 100644 --- a/crates/bevy_color/src/oklaba.rs +++ b/crates/bevy_color/src/oklaba.rs @@ -12,7 +12,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_color/src/oklcha.rs b/crates/bevy_color/src/oklcha.rs index 70c150ed0f..91ffe422c7 100644 --- a/crates/bevy_color/src/oklcha.rs +++ b/crates/bevy_color/src/oklcha.rs @@ -12,7 +12,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_color/src/srgba.rs b/crates/bevy_color/src/srgba.rs index 49e40792b7..ead2adf039 100644 --- a/crates/bevy_color/src/srgba.rs +++ b/crates/bevy_color/src/srgba.rs @@ -15,7 +15,11 @@ use thiserror::Error; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_color/src/xyza.rs b/crates/bevy_color/src/xyza.rs index a9fb422bef..c48a868416 100644 --- a/crates/bevy_color/src/xyza.rs +++ b/crates/bevy_color/src/xyza.rs @@ -12,7 +12,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_core_pipeline/Cargo.toml b/crates/bevy_core_pipeline/Cargo.toml index 3994040369..304c007104 100644 --- a/crates/bevy_core_pipeline/Cargo.toml +++ b/crates/bevy_core_pipeline/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_core_pipeline" version = "0.16.0-dev" -edition = "2021" +edition = "2024" authors = [ "Bevy Contributors ", "Carter Anderson ", @@ -13,12 +13,10 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [features] -dds = ["bevy_render/dds", "bevy_image/dds"] trace = [] webgl = [] webgpu = [] tonemapping_luts = ["bevy_render/ktx2", "bevy_image/ktx2", "bevy_image/zstd"] -smaa_luts = ["bevy_render/ktx2", "bevy_image/ktx2", "bevy_image/zstd"] [dependencies] # bevy @@ -35,7 +33,7 @@ bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } bevy_window = { path = "../bevy_window", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", "serialize", ] } diff --git a/crates/bevy_core_pipeline/src/auto_exposure/buffers.rs b/crates/bevy_core_pipeline/src/auto_exposure/buffers.rs index cc16cd4630..38d55bc9de 100644 --- a/crates/bevy_core_pipeline/src/auto_exposure/buffers.rs +++ b/crates/bevy_core_pipeline/src/auto_exposure/buffers.rs @@ -1,5 +1,5 @@ use bevy_ecs::prelude::*; -use bevy_platform_support::collections::{hash_map::Entry, HashMap}; +use bevy_platform::collections::{hash_map::Entry, HashMap}; use bevy_render::{ render_resource::{StorageBuffer, UniformBuffer}, renderer::{RenderDevice, RenderQueue}, diff --git a/crates/bevy_core_pipeline/src/auto_exposure/compensation_curve.rs b/crates/bevy_core_pipeline/src/auto_exposure/compensation_curve.rs index 7a89de331c..e2ffe1a6c4 100644 --- a/crates/bevy_core_pipeline/src/auto_exposure/compensation_curve.rs +++ b/crates/bevy_core_pipeline/src/auto_exposure/compensation_curve.rs @@ -18,7 +18,7 @@ const LUT_SIZE: usize = 256; /// This curve is used to map the average log luminance of a scene to an /// exposure compensation value, to allow for fine control over the final exposure. #[derive(Asset, Reflect, Debug, Clone)] -#[reflect(Default)] +#[reflect(Default, Clone)] pub struct AutoExposureCompensationCurve { /// The minimum log luminance value in the curve. (the x-axis) min_log_lum: f32, diff --git a/crates/bevy_core_pipeline/src/auto_exposure/settings.rs b/crates/bevy_core_pipeline/src/auto_exposure/settings.rs index b5039030ac..cf6fdd4e24 100644 --- a/crates/bevy_core_pipeline/src/auto_exposure/settings.rs +++ b/crates/bevy_core_pipeline/src/auto_exposure/settings.rs @@ -24,7 +24,7 @@ use bevy_utils::default; /// /// **Auto Exposure requires compute shaders and is not compatible with WebGL2.** #[derive(Component, Clone, Reflect, ExtractComponent)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct AutoExposure { /// The range of exposure values for the histogram. /// diff --git a/crates/bevy_core_pipeline/src/bloom/mod.rs b/crates/bevy_core_pipeline/src/bloom/mod.rs index 938f2f243b..8717b9096e 100644 --- a/crates/bevy_core_pipeline/src/bloom/mod.rs +++ b/crates/bevy_core_pipeline/src/bloom/mod.rs @@ -2,7 +2,6 @@ mod downsampling_pipeline; mod settings; mod upsampling_pipeline; -use bevy_color::{Gray, LinearRgba}; pub use settings::{Bloom, BloomCompositeMode, BloomPrefilter}; use crate::{ @@ -11,6 +10,7 @@ use crate::{ }; use bevy_app::{App, Plugin}; use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_color::{Gray, LinearRgba}; use bevy_ecs::{prelude::*, query::QueryItem}; use bevy_math::{ops, UVec2}; use bevy_render::{ @@ -30,6 +30,8 @@ use downsampling_pipeline::{ prepare_downsampling_pipeline, BloomDownsamplingPipeline, BloomDownsamplingPipelineIds, BloomUniforms, }; +#[cfg(feature = "trace")] +use tracing::info_span; use upsampling_pipeline::{ prepare_upsampling_pipeline, BloomUpsamplingPipeline, UpsamplingPipelineIds, }; @@ -108,10 +110,10 @@ impl ViewNode for BloomNode { // Atypically for a post-processing effect, we do not need to // use a secondary texture normally provided by view_target.post_process_write(), // instead we write into our own bloom texture and then directly back onto main. - fn run( + fn run<'w>( &self, _graph: &mut RenderGraphContext, - render_context: &mut RenderContext, + render_context: &mut RenderContext<'w>, ( camera, view_target, @@ -121,8 +123,8 @@ impl ViewNode for BloomNode { bloom_settings, upsampling_pipeline_ids, downsampling_pipeline_ids, - ): QueryItem, - world: &World, + ): QueryItem<'w, Self::ViewQuery>, + world: &'w World, ) -> Result<(), NodeRunError> { if bloom_settings.intensity == 0.0 { return Ok(()); @@ -149,132 +151,152 @@ impl ViewNode for BloomNode { return Ok(()); }; - render_context.command_encoder().push_debug_group("bloom"); - + let view_texture = view_target.main_texture_view(); + let view_texture_unsampled = view_target.get_unsampled_color_attachment(); let diagnostics = render_context.diagnostic_recorder(); - let time_span = diagnostics.time_span(render_context.command_encoder(), "bloom"); - // First downsample pass - { - let downsampling_first_bind_group = render_context.render_device().create_bind_group( - "bloom_downsampling_first_bind_group", - &downsampling_pipeline_res.bind_group_layout, - &BindGroupEntries::sequential(( - // Read from main texture directly - view_target.main_texture_view(), - &bind_groups.sampler, - uniforms.clone(), - )), - ); + render_context.add_command_buffer_generation_task(move |render_device| { + #[cfg(feature = "trace")] + let _bloom_span = info_span!("bloom").entered(); - let view = &bloom_texture.view(0); - let mut downsampling_first_pass = - render_context.begin_tracked_render_pass(RenderPassDescriptor { - label: Some("bloom_downsampling_first_pass"), - color_attachments: &[Some(RenderPassColorAttachment { - view, - resolve_target: None, - ops: Operations::default(), - })], - depth_stencil_attachment: None, - timestamp_writes: None, - occlusion_query_set: None, + let mut command_encoder = + render_device.create_command_encoder(&CommandEncoderDescriptor { + label: Some("bloom_command_encoder"), }); - downsampling_first_pass.set_render_pipeline(downsampling_first_pipeline); - downsampling_first_pass.set_bind_group( - 0, - &downsampling_first_bind_group, - &[uniform_index.index()], - ); - downsampling_first_pass.draw(0..3, 0..1); - } + command_encoder.push_debug_group("bloom"); + let time_span = diagnostics.time_span(&mut command_encoder, "bloom"); - // Other downsample passes - for mip in 1..bloom_texture.mip_count { - let view = &bloom_texture.view(mip); - let mut downsampling_pass = - render_context.begin_tracked_render_pass(RenderPassDescriptor { - label: Some("bloom_downsampling_pass"), - color_attachments: &[Some(RenderPassColorAttachment { - view, - resolve_target: None, - ops: Operations::default(), - })], - depth_stencil_attachment: None, - timestamp_writes: None, - occlusion_query_set: None, - }); - downsampling_pass.set_render_pipeline(downsampling_pipeline); - downsampling_pass.set_bind_group( - 0, - &bind_groups.downsampling_bind_groups[mip as usize - 1], - &[uniform_index.index()], - ); - downsampling_pass.draw(0..3, 0..1); - } + // First downsample pass + { + let downsampling_first_bind_group = render_device.create_bind_group( + "bloom_downsampling_first_bind_group", + &downsampling_pipeline_res.bind_group_layout, + &BindGroupEntries::sequential(( + // Read from main texture directly + view_texture, + &bind_groups.sampler, + uniforms.clone(), + )), + ); - // Upsample passes except the final one - for mip in (1..bloom_texture.mip_count).rev() { - let view = &bloom_texture.view(mip - 1); - let mut upsampling_pass = - render_context.begin_tracked_render_pass(RenderPassDescriptor { - label: Some("bloom_upsampling_pass"), - color_attachments: &[Some(RenderPassColorAttachment { - view, - resolve_target: None, - ops: Operations { - load: LoadOp::Load, - store: StoreOp::Store, - }, - })], - depth_stencil_attachment: None, - timestamp_writes: None, - occlusion_query_set: None, - }); - upsampling_pass.set_render_pipeline(upsampling_pipeline); - upsampling_pass.set_bind_group( - 0, - &bind_groups.upsampling_bind_groups[(bloom_texture.mip_count - mip - 1) as usize], - &[uniform_index.index()], - ); - let blend = compute_blend_factor( - bloom_settings, - mip as f32, - (bloom_texture.mip_count - 1) as f32, - ); - upsampling_pass.set_blend_constant(LinearRgba::gray(blend)); - upsampling_pass.draw(0..3, 0..1); - } - - // Final upsample pass - // This is very similar to the above upsampling passes with the only difference - // being the pipeline (which itself is barely different) and the color attachment - { - let mut upsampling_final_pass = - render_context.begin_tracked_render_pass(RenderPassDescriptor { - label: Some("bloom_upsampling_final_pass"), - color_attachments: &[Some(view_target.get_unsampled_color_attachment())], - depth_stencil_attachment: None, - timestamp_writes: None, - occlusion_query_set: None, - }); - upsampling_final_pass.set_render_pipeline(upsampling_final_pipeline); - upsampling_final_pass.set_bind_group( - 0, - &bind_groups.upsampling_bind_groups[(bloom_texture.mip_count - 1) as usize], - &[uniform_index.index()], - ); - if let Some(viewport) = camera.viewport.as_ref() { - upsampling_final_pass.set_camera_viewport(viewport); + let view = &bloom_texture.view(0); + let mut downsampling_first_pass = + command_encoder.begin_render_pass(&RenderPassDescriptor { + label: Some("bloom_downsampling_first_pass"), + color_attachments: &[Some(RenderPassColorAttachment { + view, + resolve_target: None, + ops: Operations::default(), + })], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + downsampling_first_pass.set_pipeline(downsampling_first_pipeline); + downsampling_first_pass.set_bind_group( + 0, + &downsampling_first_bind_group, + &[uniform_index.index()], + ); + downsampling_first_pass.draw(0..3, 0..1); } - let blend = - compute_blend_factor(bloom_settings, 0.0, (bloom_texture.mip_count - 1) as f32); - upsampling_final_pass.set_blend_constant(LinearRgba::gray(blend)); - upsampling_final_pass.draw(0..3, 0..1); - } - time_span.end(render_context.command_encoder()); - render_context.command_encoder().pop_debug_group(); + // Other downsample passes + for mip in 1..bloom_texture.mip_count { + let view = &bloom_texture.view(mip); + let mut downsampling_pass = + command_encoder.begin_render_pass(&RenderPassDescriptor { + label: Some("bloom_downsampling_pass"), + color_attachments: &[Some(RenderPassColorAttachment { + view, + resolve_target: None, + ops: Operations::default(), + })], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + downsampling_pass.set_pipeline(downsampling_pipeline); + downsampling_pass.set_bind_group( + 0, + &bind_groups.downsampling_bind_groups[mip as usize - 1], + &[uniform_index.index()], + ); + downsampling_pass.draw(0..3, 0..1); + } + + // Upsample passes except the final one + for mip in (1..bloom_texture.mip_count).rev() { + let view = &bloom_texture.view(mip - 1); + let mut upsampling_pass = + command_encoder.begin_render_pass(&RenderPassDescriptor { + label: Some("bloom_upsampling_pass"), + color_attachments: &[Some(RenderPassColorAttachment { + view, + resolve_target: None, + ops: Operations { + load: LoadOp::Load, + store: StoreOp::Store, + }, + })], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + upsampling_pass.set_pipeline(upsampling_pipeline); + upsampling_pass.set_bind_group( + 0, + &bind_groups.upsampling_bind_groups + [(bloom_texture.mip_count - mip - 1) as usize], + &[uniform_index.index()], + ); + let blend = compute_blend_factor( + bloom_settings, + mip as f32, + (bloom_texture.mip_count - 1) as f32, + ); + upsampling_pass.set_blend_constant(LinearRgba::gray(blend).into()); + upsampling_pass.draw(0..3, 0..1); + } + + // Final upsample pass + // This is very similar to the above upsampling passes with the only difference + // being the pipeline (which itself is barely different) and the color attachment + { + let mut upsampling_final_pass = + command_encoder.begin_render_pass(&RenderPassDescriptor { + label: Some("bloom_upsampling_final_pass"), + color_attachments: &[Some(view_texture_unsampled)], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + upsampling_final_pass.set_pipeline(upsampling_final_pipeline); + upsampling_final_pass.set_bind_group( + 0, + &bind_groups.upsampling_bind_groups[(bloom_texture.mip_count - 1) as usize], + &[uniform_index.index()], + ); + if let Some(viewport) = camera.viewport.as_ref() { + upsampling_final_pass.set_viewport( + viewport.physical_position.x as f32, + viewport.physical_position.y as f32, + viewport.physical_size.x as f32, + viewport.physical_size.y as f32, + viewport.depth.start, + viewport.depth.end, + ); + } + let blend = + compute_blend_factor(bloom_settings, 0.0, (bloom_texture.mip_count - 1) as f32); + upsampling_final_pass.set_blend_constant(LinearRgba::gray(blend).into()); + upsampling_final_pass.draw(0..3, 0..1); + } + + time_span.end(&mut command_encoder); + command_encoder.pop_debug_group(); + command_encoder.finish() + }); Ok(()) } diff --git a/crates/bevy_core_pipeline/src/bloom/settings.rs b/crates/bevy_core_pipeline/src/bloom/settings.rs index 2e22875a35..f6ee8dbd1e 100644 --- a/crates/bevy_core_pipeline/src/bloom/settings.rs +++ b/crates/bevy_core_pipeline/src/bloom/settings.rs @@ -25,7 +25,7 @@ use bevy_render::{extract_component::ExtractComponent, prelude::Camera}; /// See for a visualization of the parametric curve /// used in Bevy as well as a visualization of the curve's respective scattering profile. #[derive(Component, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct Bloom { /// Controls the baseline of how much the image is scattered (default: 0.15). /// @@ -193,6 +193,7 @@ impl Default for Bloom { /// * Changing these settings makes it easy to make the final result look worse /// * Non-default prefilter settings should be used in conjunction with [`BloomCompositeMode::Additive`] #[derive(Default, Clone, Reflect)] +#[reflect(Clone, Default)] pub struct BloomPrefilter { /// Baseline of the quadratic threshold curve (default: 0.0). /// @@ -209,6 +210,7 @@ pub struct BloomPrefilter { } #[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, Copy)] +#[reflect(Clone, Hash, PartialEq)] pub enum BloomCompositeMode { EnergyConserving, Additive, diff --git a/crates/bevy_core_pipeline/src/core_2d/camera_2d.rs b/crates/bevy_core_pipeline/src/core_2d/camera_2d.rs index 9780ddd31f..d46174192b 100644 --- a/crates/bevy_core_pipeline/src/core_2d/camera_2d.rs +++ b/crates/bevy_core_pipeline/src/core_2d/camera_2d.rs @@ -14,13 +14,13 @@ use bevy_transform::prelude::{GlobalTransform, Transform}; /// A 2D camera component. Enables the 2D render graph for a [`Camera`]. #[derive(Component, Default, Reflect, Clone, ExtractComponent)] #[extract_component_filter(With)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] #[require( Camera, DebandDither, - CameraRenderGraph(|| CameraRenderGraph::new(Core2d)), - Projection(|| Projection::Orthographic(OrthographicProjection::default_2d())), - Frustum(|| OrthographicProjection::default_2d().compute_frustum(&GlobalTransform::from(Transform::default()))), - Tonemapping(|| Tonemapping::None), + CameraRenderGraph::new(Core2d), + Projection::Orthographic(OrthographicProjection::default_2d()), + Frustum = OrthographicProjection::default_2d().compute_frustum(&GlobalTransform::from(Transform::default())), + Tonemapping::None, )] pub struct Camera2d; diff --git a/crates/bevy_core_pipeline/src/core_2d/main_transparent_pass_2d_node.rs b/crates/bevy_core_pipeline/src/core_2d/main_transparent_pass_2d_node.rs index dae9217d3a..494d4d0f89 100644 --- a/crates/bevy_core_pipeline/src/core_2d/main_transparent_pass_2d_node.rs +++ b/crates/bevy_core_pipeline/src/core_2d/main_transparent_pass_2d_node.rs @@ -44,6 +44,15 @@ impl ViewNode for MainTransparentPass2dNode { let diagnostics = render_context.diagnostic_recorder(); + let color_attachments = [Some(target.get_color_attachment())]; + // NOTE: For the transparent pass we load the depth buffer. There should be no + // need to write to it, but store is set to `true` as a workaround for issue #3776, + // https://github.com/bevyengine/bevy/issues/3776 + // so that wgpu does not clear the depth buffer. + // As the opaque and alpha mask passes run first, opaque meshes can occlude + // transparent ones. + let depth_stencil_attachment = Some(depth.get_attachment(StoreOp::Store)); + render_context.add_command_buffer_generation_task(move |render_device| { // Command encoder setup let mut command_encoder = @@ -58,14 +67,8 @@ impl ViewNode for MainTransparentPass2dNode { let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor { label: Some("main_transparent_pass_2d"), - color_attachments: &[Some(target.get_color_attachment())], - // NOTE: For the transparent pass we load the depth buffer. There should be no - // need to write to it, but store is set to `true` as a workaround for issue #3776, - // https://github.com/bevyengine/bevy/issues/3776 - // so that wgpu does not clear the depth buffer. - // As the opaque and alpha mask passes run first, opaque meshes can occlude - // transparent ones. - depth_stencil_attachment: Some(depth.get_attachment(StoreOp::Store)), + color_attachments: &color_attachments, + depth_stencil_attachment, timestamp_writes: None, occlusion_query_set: None, }); diff --git a/crates/bevy_core_pipeline/src/core_2d/mod.rs b/crates/bevy_core_pipeline/src/core_2d/mod.rs index f327757277..0a8ed17f8e 100644 --- a/crates/bevy_core_pipeline/src/core_2d/mod.rs +++ b/crates/bevy_core_pipeline/src/core_2d/mod.rs @@ -19,6 +19,7 @@ pub mod graph { MainOpaquePass, MainTransparentPass, EndMainPass, + Wireframe, Bloom, PostProcessing, Tonemapping, @@ -33,7 +34,7 @@ pub mod graph { use core::ops::Range; use bevy_asset::UntypedAssetId; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_render::{ batching::gpu_preprocessing::GpuPreprocessingMode, render_phase::PhaseItemBatchSetKey, @@ -349,6 +350,7 @@ pub struct Transparent2d { pub pipeline: CachedRenderPipelineId, pub draw_function: DrawFunctionId, pub batch_range: Range, + pub extracted_index: usize, pub extra_index: PhaseItemExtraIndex, /// Whether the mesh in question is indexed (uses an index buffer in /// addition to its vertex buffer). diff --git a/crates/bevy_core_pipeline/src/core_3d/camera_3d.rs b/crates/bevy_core_pipeline/src/core_3d/camera_3d.rs index 418d3b8d48..9bcb2b4f80 100644 --- a/crates/bevy_core_pipeline/src/core_3d/camera_3d.rs +++ b/crates/bevy_core_pipeline/src/core_3d/camera_3d.rs @@ -18,11 +18,11 @@ use serde::{Deserialize, Serialize}; /// This means "forward" is -Z. #[derive(Component, Reflect, Clone, ExtractComponent)] #[extract_component_filter(With)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] #[require( Camera, - DebandDither(|| DebandDither::Enabled), - CameraRenderGraph(|| CameraRenderGraph::new(Core3d)), + DebandDither::Enabled, + CameraRenderGraph::new(Core3d), Projection, Tonemapping, ColorGrading, @@ -56,7 +56,7 @@ pub struct Camera3d { /// /// Higher qualities are more GPU-intensive. /// - /// **Note:** You can get better-looking results at any quality level by enabling TAA. See: [`TemporalAntiAliasPlugin`](crate::experimental::taa::TemporalAntiAliasPlugin). + /// **Note:** You can get better-looking results at any quality level by enabling TAA. See: `TemporalAntiAliasPlugin` pub screen_space_specular_transmission_quality: ScreenSpaceTransmissionQuality, } @@ -72,7 +72,7 @@ impl Default for Camera3d { } #[derive(Clone, Copy, Reflect, Serialize, Deserialize)] -#[reflect(Serialize, Deserialize)] +#[reflect(Serialize, Deserialize, Clone)] pub struct Camera3dDepthTextureUsage(pub u32); impl From for Camera3dDepthTextureUsage { @@ -88,7 +88,7 @@ impl From for TextureUsages { /// The depth clear operation to perform for the main 3d pass. #[derive(Reflect, Serialize, Deserialize, Clone, Debug)] -#[reflect(Serialize, Deserialize)] +#[reflect(Serialize, Deserialize, Clone, Default)] pub enum Camera3dDepthLoadOp { /// Clear with a specified value. /// Note that 0.0 is the far plane due to bevy's use of reverse-z projections. @@ -117,9 +117,9 @@ impl From for LoadOp { /// /// Higher qualities are more GPU-intensive. /// -/// **Note:** You can get better-looking results at any quality level by enabling TAA. See: [`TemporalAntiAliasPlugin`](crate::experimental::taa::TemporalAntiAliasPlugin). +/// **Note:** You can get better-looking results at any quality level by enabling TAA. See: `TemporalAntiAliasPlugin` #[derive(Resource, Default, Clone, Copy, Reflect, PartialEq, PartialOrd, Debug)] -#[reflect(Resource, Default, Debug, PartialEq)] +#[reflect(Resource, Default, Clone, Debug, PartialEq)] pub enum ScreenSpaceTransmissionQuality { /// Best performance at the cost of quality. Suitable for lower end GPUs. (e.g. Mobile) /// diff --git a/crates/bevy_core_pipeline/src/core_3d/mod.rs b/crates/bevy_core_pipeline/src/core_3d/mod.rs index 7572d80b39..b9f6955499 100644 --- a/crates/bevy_core_pipeline/src/core_3d/mod.rs +++ b/crates/bevy_core_pipeline/src/core_3d/mod.rs @@ -19,7 +19,8 @@ pub mod graph { EarlyPrepass, EarlyDownsampleDepth, LatePrepass, - DeferredPrepass, + EarlyDeferredPrepass, + LateDeferredPrepass, CopyDeferredLightingId, EndPrepasses, StartMainPass, @@ -27,6 +28,7 @@ pub mod graph { MainTransmissivePass, MainTransparentPass, EndMainPass, + Wireframe, LateDownsampleDepth, Taa, MotionBlur, @@ -85,7 +87,7 @@ use bevy_color::LinearRgba; use bevy_ecs::prelude::*; use bevy_image::BevyDefault; use bevy_math::FloatOrd; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_render::{ camera::{Camera, ExtractedCamera}, extract_component::ExtractComponentPlugin, @@ -112,7 +114,8 @@ use tracing::warn; use crate::{ core_3d::main_transmissive_pass_3d_node::MainTransmissivePass3dNode, deferred::{ - copy_lighting_id::CopyDeferredLightingIdNode, node::DeferredGBufferPrepassNode, + copy_lighting_id::CopyDeferredLightingIdNode, + node::{EarlyDeferredGBufferPrepassNode, LateDeferredGBufferPrepassNode}, AlphaMask3dDeferred, Opaque3dDeferred, DEFERRED_LIGHTING_PASS_ID_FORMAT, DEFERRED_PREPASS_FORMAT, }, @@ -179,9 +182,13 @@ impl Plugin for Core3dPlugin { .add_render_sub_graph(Core3d) .add_render_graph_node::>(Core3d, Node3d::EarlyPrepass) .add_render_graph_node::>(Core3d, Node3d::LatePrepass) - .add_render_graph_node::>( + .add_render_graph_node::>( Core3d, - Node3d::DeferredPrepass, + Node3d::EarlyDeferredPrepass, + ) + .add_render_graph_node::>( + Core3d, + Node3d::LateDeferredPrepass, ) .add_render_graph_node::>( Core3d, @@ -210,8 +217,9 @@ impl Plugin for Core3dPlugin { Core3d, ( Node3d::EarlyPrepass, + Node3d::EarlyDeferredPrepass, Node3d::LatePrepass, - Node3d::DeferredPrepass, + Node3d::LateDeferredPrepass, Node3d::CopyDeferredLightingId, Node3d::EndPrepasses, Node3d::StartMainPass, @@ -718,13 +726,35 @@ pub fn extract_camera_prepass_phase( } live_entities.insert(retained_view_entity); - commands + // Add or remove prepasses as appropriate. + + let mut camera_commands = commands .get_entity(entity) - .expect("Camera entity wasn't synced.") - .insert_if(DepthPrepass, || depth_prepass) - .insert_if(NormalPrepass, || normal_prepass) - .insert_if(MotionVectorPrepass, || motion_vector_prepass) - .insert_if(DeferredPrepass, || deferred_prepass); + .expect("Camera entity wasn't synced."); + + if depth_prepass { + camera_commands.insert(DepthPrepass); + } else { + camera_commands.remove::(); + } + + if normal_prepass { + camera_commands.insert(NormalPrepass); + } else { + camera_commands.remove::(); + } + + if motion_vector_prepass { + camera_commands.insert(MotionVectorPrepass); + } else { + camera_commands.remove::(); + } + + if deferred_prepass { + camera_commands.insert(DeferredPrepass); + } else { + camera_commands.remove::(); + } } opaque_3d_prepass_phases.retain(|view_entity, _| live_entities.contains(view_entity)); @@ -921,7 +951,6 @@ fn configure_occlusion_culling_view_targets( With, Without, With, - Without, ), >, ) { @@ -986,6 +1015,7 @@ pub fn prepare_prepass_textures( && !opaque_3d_deferred_phases.contains_key(&view.retained_view_entity) && !alpha_mask_3d_deferred_phases.contains_key(&view.retained_view_entity) { + commands.entity(entity).remove::(); continue; }; diff --git a/crates/bevy_core_pipeline/src/deferred/node.rs b/crates/bevy_core_pipeline/src/deferred/node.rs index 5485e8fc00..ffac1eec6d 100644 --- a/crates/bevy_core_pipeline/src/deferred/node.rs +++ b/crates/bevy_core_pipeline/src/deferred/node.rs @@ -1,7 +1,8 @@ use bevy_ecs::{prelude::*, query::QueryItem}; +use bevy_render::experimental::occlusion_culling::OcclusionCulling; use bevy_render::render_graph::ViewNode; -use bevy_render::view::ExtractedView; +use bevy_render::view::{ExtractedView, NoIndirectDrawing}; use bevy_render::{ camera::ExtractedCamera, render_graph::{NodeRunError, RenderGraphContext}, @@ -18,76 +19,151 @@ use crate::prepass::ViewPrepassTextures; use super::{AlphaMask3dDeferred, Opaque3dDeferred}; -/// Render node used by the prepass. +/// The phase of the deferred prepass that draws meshes that were visible last +/// frame. /// -/// By default, inserted before the main pass in the render graph. +/// If occlusion culling isn't in use, this prepass simply draws all meshes. +/// +/// Like all prepass nodes, this is inserted before the main pass in the render +/// graph. #[derive(Default)] -pub struct DeferredGBufferPrepassNode; +pub struct EarlyDeferredGBufferPrepassNode; -impl ViewNode for DeferredGBufferPrepassNode { +impl ViewNode for EarlyDeferredGBufferPrepassNode { + type ViewQuery = ::ViewQuery; + + fn run<'w>( + &self, + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + view_query: QueryItem<'w, Self::ViewQuery>, + world: &'w World, + ) -> Result<(), NodeRunError> { + run_deferred_prepass( + graph, + render_context, + view_query, + false, + world, + "early deferred prepass", + ) + } +} + +/// The phase of the prepass that runs after occlusion culling against the +/// meshes that were visible last frame. +/// +/// If occlusion culling isn't in use, this is a no-op. +/// +/// Like all prepass nodes, this is inserted before the main pass in the render +/// graph. +#[derive(Default)] +pub struct LateDeferredGBufferPrepassNode; + +impl ViewNode for LateDeferredGBufferPrepassNode { type ViewQuery = ( &'static ExtractedCamera, &'static ExtractedView, &'static ViewDepthTexture, &'static ViewPrepassTextures, + Has, + Has, ); fn run<'w>( &self, graph: &mut RenderGraphContext, render_context: &mut RenderContext<'w>, - (camera, extracted_view, view_depth_texture, view_prepass_textures): QueryItem< - 'w, - Self::ViewQuery, - >, + view_query: QueryItem<'w, Self::ViewQuery>, world: &'w World, ) -> Result<(), NodeRunError> { - let (Some(opaque_deferred_phases), Some(alpha_mask_deferred_phases)) = ( - world.get_resource::>(), - world.get_resource::>(), - ) else { + let (_, _, _, _, occlusion_culling, no_indirect_drawing) = view_query; + if !occlusion_culling || no_indirect_drawing { return Ok(()); - }; + } - let (Some(opaque_deferred_phase), Some(alpha_mask_deferred_phase)) = ( - opaque_deferred_phases.get(&extracted_view.retained_view_entity), - alpha_mask_deferred_phases.get(&extracted_view.retained_view_entity), - ) else { - return Ok(()); - }; + run_deferred_prepass( + graph, + render_context, + view_query, + true, + world, + "late deferred prepass", + ) + } +} - let mut color_attachments = vec![]; - color_attachments.push( - view_prepass_textures - .normal - .as_ref() - .map(|normals_texture| normals_texture.get_attachment()), - ); - color_attachments.push( - view_prepass_textures - .motion_vectors - .as_ref() - .map(|motion_vectors_texture| motion_vectors_texture.get_attachment()), - ); +/// Runs the deferred prepass that draws all meshes to the depth buffer and +/// G-buffers. +/// +/// If occlusion culling isn't in use, and a prepass is enabled, then there's +/// only one prepass. If occlusion culling is in use, then any prepass is split +/// into two: an *early* prepass and a *late* prepass. The early prepass draws +/// what was visible last frame, and the last prepass performs occlusion culling +/// against a conservative hierarchical Z buffer before drawing unoccluded +/// meshes. +fn run_deferred_prepass<'w>( + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + (camera, extracted_view, view_depth_texture, view_prepass_textures, _, _): QueryItem< + 'w, + ::ViewQuery, + >, + is_late: bool, + world: &'w World, + label: &'static str, +) -> Result<(), NodeRunError> { + let (Some(opaque_deferred_phases), Some(alpha_mask_deferred_phases)) = ( + world.get_resource::>(), + world.get_resource::>(), + ) else { + return Ok(()); + }; - // If we clear the deferred texture with LoadOp::Clear(Default::default()) we get these errors: - // Chrome: GL_INVALID_OPERATION: No defined conversion between clear value and attachment format. - // Firefox: WebGL warning: clearBufferu?[fi]v: This attachment is of type FLOAT, but this function is of type UINT. - // Appears to be unsupported: https://registry.khronos.org/webgl/specs/latest/2.0/#3.7.9 - // For webgl2 we fallback to manually clearing - #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] + let (Some(opaque_deferred_phase), Some(alpha_mask_deferred_phase)) = ( + opaque_deferred_phases.get(&extracted_view.retained_view_entity), + alpha_mask_deferred_phases.get(&extracted_view.retained_view_entity), + ) else { + return Ok(()); + }; + + let mut color_attachments = vec![]; + color_attachments.push( + view_prepass_textures + .normal + .as_ref() + .map(|normals_texture| normals_texture.get_attachment()), + ); + color_attachments.push( + view_prepass_textures + .motion_vectors + .as_ref() + .map(|motion_vectors_texture| motion_vectors_texture.get_attachment()), + ); + + // If we clear the deferred texture with LoadOp::Clear(Default::default()) we get these errors: + // Chrome: GL_INVALID_OPERATION: No defined conversion between clear value and attachment format. + // Firefox: WebGL warning: clearBufferu?[fi]v: This attachment is of type FLOAT, but this function is of type UINT. + // Appears to be unsupported: https://registry.khronos.org/webgl/specs/latest/2.0/#3.7.9 + // For webgl2 we fallback to manually clearing + #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] + if !is_late { if let Some(deferred_texture) = &view_prepass_textures.deferred { render_context.command_encoder().clear_texture( &deferred_texture.texture.texture, &bevy_render::render_resource::ImageSubresourceRange::default(), ); } + } - color_attachments.push( - view_prepass_textures - .deferred - .as_ref() - .map(|deferred_texture| { + color_attachments.push( + view_prepass_textures + .deferred + .as_ref() + .map(|deferred_texture| { + if is_late { + deferred_texture.get_attachment() + } else { #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] { bevy_render::render_resource::RenderPassColorAttachment { @@ -105,87 +181,82 @@ impl ViewNode for DeferredGBufferPrepassNode { feature = "webgpu" ))] deferred_texture.get_attachment() - }), - ); - - color_attachments.push( - view_prepass_textures - .deferred_lighting_pass_id - .as_ref() - .map(|deferred_lighting_pass_id| deferred_lighting_pass_id.get_attachment()), - ); - - // If all color attachments are none: clear the color attachment list so that no fragment shader is required - if color_attachments.iter().all(Option::is_none) { - color_attachments.clear(); - } - - let depth_stencil_attachment = Some(view_depth_texture.get_attachment(StoreOp::Store)); - - let view_entity = graph.view_entity(); - render_context.add_command_buffer_generation_task(move |render_device| { - #[cfg(feature = "trace")] - let _deferred_span = info_span!("deferred_prepass").entered(); - - // Command encoder setup - let mut command_encoder = - render_device.create_command_encoder(&CommandEncoderDescriptor { - label: Some("deferred_prepass_command_encoder"), - }); - - // Render pass setup - let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor { - label: Some("deferred_prepass"), - color_attachments: &color_attachments, - depth_stencil_attachment, - timestamp_writes: None, - occlusion_query_set: None, - }); - let mut render_pass = TrackedRenderPass::new(&render_device, render_pass); - if let Some(viewport) = camera.viewport.as_ref() { - render_pass.set_camera_viewport(viewport); - } - - // Opaque draws - if !opaque_deferred_phase.multidrawable_mesh_keys.is_empty() - || !opaque_deferred_phase.batchable_mesh_keys.is_empty() - || !opaque_deferred_phase.unbatchable_mesh_keys.is_empty() - { - #[cfg(feature = "trace")] - let _opaque_prepass_span = info_span!("opaque_deferred_prepass").entered(); - if let Err(err) = opaque_deferred_phase.render(&mut render_pass, world, view_entity) - { - error!("Error encountered while rendering the opaque deferred phase {err:?}"); } - } + }), + ); - // Alpha masked draws - if !alpha_mask_deferred_phase.is_empty() { - #[cfg(feature = "trace")] - let _alpha_mask_deferred_span = info_span!("alpha_mask_deferred_prepass").entered(); - if let Err(err) = - alpha_mask_deferred_phase.render(&mut render_pass, world, view_entity) - { - error!( - "Error encountered while rendering the alpha mask deferred phase {err:?}" - ); - } - } + color_attachments.push( + view_prepass_textures + .deferred_lighting_pass_id + .as_ref() + .map(|deferred_lighting_pass_id| deferred_lighting_pass_id.get_attachment()), + ); - drop(render_pass); + // If all color attachments are none: clear the color attachment list so that no fragment shader is required + if color_attachments.iter().all(Option::is_none) { + color_attachments.clear(); + } - // After rendering to the view depth texture, copy it to the prepass depth texture - if let Some(prepass_depth_texture) = &view_prepass_textures.depth { - command_encoder.copy_texture_to_texture( - view_depth_texture.texture.as_image_copy(), - prepass_depth_texture.texture.texture.as_image_copy(), - view_prepass_textures.size, - ); - } + let depth_stencil_attachment = Some(view_depth_texture.get_attachment(StoreOp::Store)); - command_encoder.finish() + let view_entity = graph.view_entity(); + render_context.add_command_buffer_generation_task(move |render_device| { + #[cfg(feature = "trace")] + let _deferred_span = info_span!("deferred_prepass").entered(); + + // Command encoder setup + let mut command_encoder = render_device.create_command_encoder(&CommandEncoderDescriptor { + label: Some("deferred_prepass_command_encoder"), }); - Ok(()) - } + // Render pass setup + let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor { + label: Some(label), + color_attachments: &color_attachments, + depth_stencil_attachment, + timestamp_writes: None, + occlusion_query_set: None, + }); + let mut render_pass = TrackedRenderPass::new(&render_device, render_pass); + if let Some(viewport) = camera.viewport.as_ref() { + render_pass.set_camera_viewport(viewport); + } + + // Opaque draws + if !opaque_deferred_phase.multidrawable_meshes.is_empty() + || !opaque_deferred_phase.batchable_meshes.is_empty() + || !opaque_deferred_phase.unbatchable_meshes.is_empty() + { + #[cfg(feature = "trace")] + let _opaque_prepass_span = info_span!("opaque_deferred_prepass").entered(); + if let Err(err) = opaque_deferred_phase.render(&mut render_pass, world, view_entity) { + error!("Error encountered while rendering the opaque deferred phase {err:?}"); + } + } + + // Alpha masked draws + if !alpha_mask_deferred_phase.is_empty() { + #[cfg(feature = "trace")] + let _alpha_mask_deferred_span = info_span!("alpha_mask_deferred_prepass").entered(); + if let Err(err) = alpha_mask_deferred_phase.render(&mut render_pass, world, view_entity) + { + error!("Error encountered while rendering the alpha mask deferred phase {err:?}"); + } + } + + drop(render_pass); + + // After rendering to the view depth texture, copy it to the prepass depth texture + if let Some(prepass_depth_texture) = &view_prepass_textures.depth { + command_encoder.copy_texture_to_texture( + view_depth_texture.texture.as_image_copy(), + prepass_depth_texture.texture.texture.as_image_copy(), + view_prepass_textures.size, + ); + } + + command_encoder.finish() + }); + + Ok(()) } diff --git a/crates/bevy_core_pipeline/src/dof/mod.rs b/crates/bevy_core_pipeline/src/dof/mod.rs index 028bc97350..87a10313f1 100644 --- a/crates/bevy_core_pipeline/src/dof/mod.rs +++ b/crates/bevy_core_pipeline/src/dof/mod.rs @@ -23,7 +23,7 @@ use bevy_ecs::{ query::{QueryItem, With}, reflect::ReflectComponent, resource::Resource, - schedule::IntoSystemConfigs as _, + schedule::IntoScheduleConfigs as _, system::{lifetimeless::Read, Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; @@ -79,7 +79,7 @@ pub struct DepthOfFieldPlugin; /// /// [depth of field]: https://en.wikipedia.org/wiki/Depth_of_field #[derive(Component, Clone, Copy, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Clone, Default)] pub struct DepthOfField { /// The appearance of the effect. pub mode: DepthOfFieldMode, @@ -123,7 +123,7 @@ pub struct DepthOfField { /// Controls the appearance of the effect. #[derive(Clone, Copy, Default, PartialEq, Debug, Reflect)] -#[reflect(Default, PartialEq)] +#[reflect(Default, Clone, PartialEq)] pub enum DepthOfFieldMode { /// A more accurate simulation, in which circles of confusion generate /// "spots" of light. diff --git a/crates/bevy_core_pipeline/src/experimental/mip_generation/downsample_depth.wgsl b/crates/bevy_core_pipeline/src/experimental/mip_generation/downsample_depth.wgsl index d24afa390d..12a4d2b178 100644 --- a/crates/bevy_core_pipeline/src/experimental/mip_generation/downsample_depth.wgsl +++ b/crates/bevy_core_pipeline/src/experimental/mip_generation/downsample_depth.wgsl @@ -1,8 +1,8 @@ #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT -@group(0) @binding(0) var mip_0: array; // Per pixel +@group(0) @binding(0) var mip_0: texture_storage_2d; #else #ifdef MESHLET -@group(0) @binding(0) var mip_0: array; // Per pixel +@group(0) @binding(0) var mip_0: texture_storage_2d; #else // MESHLET #ifdef MULTISAMPLE @group(0) @binding(0) var mip_0: texture_depth_multisampled_2d; @@ -24,7 +24,7 @@ @group(0) @binding(11) var mip_11: texture_storage_2d; @group(0) @binding(12) var mip_12: texture_storage_2d; @group(0) @binding(13) var samplr: sampler; -struct Constants { max_mip_level: u32, view_width: u32 } +struct Constants { max_mip_level: u32 } var constants: Constants; /// Generates a hierarchical depth buffer. @@ -39,7 +39,6 @@ var intermediate_memory: array, 16>; @compute @workgroup_size(256, 1, 1) fn downsample_depth_first( - @builtin(num_workgroups) num_workgroups: vec3u, @builtin(workgroup_id) workgroup_id: vec3u, @builtin(local_invocation_index) local_invocation_index: u32, ) { @@ -309,12 +308,13 @@ fn reduce_load_mip_6(tex: vec2u) -> f32 { } fn load_mip_0(x: u32, y: u32) -> f32 { - let i = y * constants.view_width + x; #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT - return bitcast(u32(mip_0[i] >> 32u)); + let visibility = textureLoad(mip_0, vec2(x, y)).r; + return bitcast(u32(visibility >> 32u)); #else // MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT #ifdef MESHLET - return bitcast(mip_0[i]); + let visibility = textureLoad(mip_0, vec2(x, y)).r; + return bitcast(visibility); #else // MESHLET // Downsample the top level. #ifdef MULTISAMPLE diff --git a/crates/bevy_core_pipeline/src/experimental/mip_generation/mod.rs b/crates/bevy_core_pipeline/src/experimental/mip_generation/mod.rs index 4ad5a7d36b..cd2099e49e 100644 --- a/crates/bevy_core_pipeline/src/experimental/mip_generation/mod.rs +++ b/crates/bevy_core_pipeline/src/experimental/mip_generation/mod.rs @@ -7,6 +7,10 @@ use core::array; +use crate::core_3d::{ + graph::{Core3d, Node3d}, + prepare_core_3d_depth_textures, +}; use bevy_app::{App, Plugin}; use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_derive::{Deref, DerefMut}; @@ -14,40 +18,36 @@ use bevy_ecs::{ component::Component, entity::Entity, prelude::{resource_exists, Without}, - query::{QueryItem, With}, + query::{Or, QueryState, With}, resource::Resource, - schedule::IntoSystemConfigs as _, + schedule::IntoScheduleConfigs as _, system::{lifetimeless::Read, Commands, Local, Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_math::{uvec2, UVec2, Vec4Swizzles as _}; +use bevy_render::batching::gpu_preprocessing::GpuPreprocessingSupport; use bevy_render::{ - experimental::occlusion_culling::OcclusionCulling, - render_graph::{NodeRunError, RenderGraphApp, RenderGraphContext, ViewNode, ViewNodeRunner}, + experimental::occlusion_culling::{ + OcclusionCulling, OcclusionCullingSubview, OcclusionCullingSubviewEntities, + }, + render_graph::{Node, NodeRunError, RenderGraphApp, RenderGraphContext}, render_resource::{ binding_types::{sampler, texture_2d, texture_2d_multisampled, texture_storage_2d}, BindGroup, BindGroupEntries, BindGroupLayout, BindGroupLayoutEntries, CachedComputePipelineId, ComputePassDescriptor, ComputePipeline, ComputePipelineDescriptor, - DownlevelFlags, Extent3d, IntoBinding, PipelineCache, PushConstantRange, Sampler, - SamplerBindingType, SamplerDescriptor, Shader, ShaderStages, SpecializedComputePipeline, + Extent3d, IntoBinding, PipelineCache, PushConstantRange, Sampler, SamplerBindingType, + SamplerDescriptor, Shader, ShaderStages, SpecializedComputePipeline, SpecializedComputePipelines, StorageTextureAccess, TextureAspect, TextureDescriptor, TextureDimension, TextureFormat, TextureSampleType, TextureUsages, TextureView, TextureViewDescriptor, TextureViewDimension, }, - renderer::{RenderAdapter, RenderContext, RenderDevice}, + renderer::{RenderContext, RenderDevice}, texture::TextureCache, view::{ExtractedView, NoIndirectDrawing, ViewDepthTexture}, Render, RenderApp, RenderSet, }; use bitflags::bitflags; - -use crate::{ - core_3d::{ - graph::{Core3d, Node3d}, - prepare_core_3d_depth_textures, - }, - prepass::{DeferredPrepass, DepthPrepass}, -}; +use tracing::debug; /// Identifies the `downsample_depth.wgsl` shader. pub const DOWNSAMPLE_DEPTH_SHADER_HANDLE: Handle = @@ -81,21 +81,16 @@ impl Plugin for MipGenerationPlugin { render_app .init_resource::>() - .add_render_graph_node::>( - Core3d, - Node3d::EarlyDownsampleDepth, - ) - .add_render_graph_node::>( - Core3d, - Node3d::LateDownsampleDepth, - ) + .add_render_graph_node::(Core3d, Node3d::EarlyDownsampleDepth) + .add_render_graph_node::(Core3d, Node3d::LateDownsampleDepth) .add_render_graph_edges( Core3d, ( Node3d::EarlyPrepass, + Node3d::EarlyDeferredPrepass, Node3d::EarlyDownsampleDepth, Node3d::LatePrepass, - Node3d::DeferredPrepass, + Node3d::LateDeferredPrepass, ), ) .add_render_graph_edges( @@ -136,7 +131,7 @@ impl Plugin for MipGenerationPlugin { /// /// This runs the single-pass downsampling (SPD) shader with the *min* filter in /// order to generate a series of mipmaps for the Z buffer. The resulting -/// hierarchical Z buffer can be used for occlusion culling. +/// hierarchical Z-buffer can be used for occlusion culling. /// /// There are two instances of this node. The *early* downsample depth pass is /// the first hierarchical Z-buffer stage, which runs after the early prepass @@ -147,79 +142,150 @@ impl Plugin for MipGenerationPlugin { /// of the *next* frame will perform. /// /// This node won't do anything if occlusion culling isn't on. -#[derive(Default)] -pub struct DownsampleDepthNode; - -impl ViewNode for DownsampleDepthNode { - type ViewQuery = ( +pub struct DownsampleDepthNode { + /// The query that we use to find views that need occlusion culling for + /// their Z-buffer. + main_view_query: QueryState<( Read, Read, Read, - ); + Option>, + )>, + /// The query that we use to find shadow maps that need occlusion culling. + shadow_view_query: QueryState<( + Read, + Read, + Read, + )>, +} + +impl FromWorld for DownsampleDepthNode { + fn from_world(world: &mut World) -> Self { + Self { + main_view_query: QueryState::new(world), + shadow_view_query: QueryState::new(world), + } + } +} + +impl Node for DownsampleDepthNode { + fn update(&mut self, world: &mut World) { + self.main_view_query.update_archetypes(world); + self.shadow_view_query.update_archetypes(world); + } fn run<'w>( &self, render_graph_context: &mut RenderGraphContext, render_context: &mut RenderContext<'w>, - (view_depth_pyramid, view_downsample_depth_bind_group, view_depth_texture): QueryItem< - 'w, - Self::ViewQuery, - >, world: &'w World, ) -> Result<(), NodeRunError> { - // Produce a depth pyramid from the current depth buffer for a single - // view. The resulting depth pyramid can be used for occlusion testing. - - let downsample_depth_pipelines = world.resource::(); - let pipeline_cache = world.resource::(); - - // Despite the name "single-pass downsampling", we actually need two - // passes because of the lack of `coherent` buffers in WGPU/WGSL. - // Between each pass, there's an implicit synchronization barrier. - - // Fetch the appropriate pipeline ID, depending on whether the depth - // buffer is multisampled or not. - let (Some(first_downsample_depth_pipeline_id), Some(second_downsample_depth_pipeline_id)) = - (if view_depth_texture.texture.sample_count() > 1 { - ( - downsample_depth_pipelines.first_multisample.pipeline_id, - downsample_depth_pipelines.second_multisample.pipeline_id, - ) - } else { - ( - downsample_depth_pipelines.first.pipeline_id, - downsample_depth_pipelines.second.pipeline_id, - ) - }) + let Ok(( + view_depth_pyramid, + view_downsample_depth_bind_group, + view_depth_texture, + maybe_view_light_entities, + )) = self + .main_view_query + .get_manual(world, render_graph_context.view_entity()) else { return Ok(()); }; - // Fetch the pipelines for the two passes. - let (Some(first_downsample_depth_pipeline), Some(second_downsample_depth_pipeline)) = ( - pipeline_cache.get_compute_pipeline(first_downsample_depth_pipeline_id), - pipeline_cache.get_compute_pipeline(second_downsample_depth_pipeline_id), - ) else { - return Ok(()); - }; - - // Run the depth downsampling. - let view_size = uvec2( - view_depth_texture.texture.width(), - view_depth_texture.texture.height(), - ); - view_depth_pyramid.downsample_depth( - &format!("{:?}", render_graph_context.label()), + // Downsample depth for the main Z-buffer. + downsample_depth( + render_graph_context, render_context, - view_size, + world, + view_depth_pyramid, view_downsample_depth_bind_group, - first_downsample_depth_pipeline, - second_downsample_depth_pipeline, - ); + uvec2( + view_depth_texture.texture.width(), + view_depth_texture.texture.height(), + ), + view_depth_texture.texture.sample_count(), + )?; + + // Downsample depth for shadow maps that have occlusion culling enabled. + if let Some(view_light_entities) = maybe_view_light_entities { + for &view_light_entity in &view_light_entities.0 { + let Ok((view_depth_pyramid, view_downsample_depth_bind_group, occlusion_culling)) = + self.shadow_view_query.get_manual(world, view_light_entity) + else { + continue; + }; + downsample_depth( + render_graph_context, + render_context, + world, + view_depth_pyramid, + view_downsample_depth_bind_group, + UVec2::splat(occlusion_culling.depth_texture_size), + 1, + )?; + } + } + Ok(()) } } +/// Produces a depth pyramid from the current depth buffer for a single view. +/// The resulting depth pyramid can be used for occlusion testing. +fn downsample_depth<'w>( + render_graph_context: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + view_depth_pyramid: &ViewDepthPyramid, + view_downsample_depth_bind_group: &ViewDownsampleDepthBindGroup, + view_size: UVec2, + sample_count: u32, +) -> Result<(), NodeRunError> { + let downsample_depth_pipelines = world.resource::(); + let pipeline_cache = world.resource::(); + + // Despite the name "single-pass downsampling", we actually need two + // passes because of the lack of `coherent` buffers in WGPU/WGSL. + // Between each pass, there's an implicit synchronization barrier. + + // Fetch the appropriate pipeline ID, depending on whether the depth + // buffer is multisampled or not. + let (Some(first_downsample_depth_pipeline_id), Some(second_downsample_depth_pipeline_id)) = + (if sample_count > 1 { + ( + downsample_depth_pipelines.first_multisample.pipeline_id, + downsample_depth_pipelines.second_multisample.pipeline_id, + ) + } else { + ( + downsample_depth_pipelines.first.pipeline_id, + downsample_depth_pipelines.second.pipeline_id, + ) + }) + else { + return Ok(()); + }; + + // Fetch the pipelines for the two passes. + let (Some(first_downsample_depth_pipeline), Some(second_downsample_depth_pipeline)) = ( + pipeline_cache.get_compute_pipeline(first_downsample_depth_pipeline_id), + pipeline_cache.get_compute_pipeline(second_downsample_depth_pipeline_id), + ) else { + return Ok(()); + }; + + // Run the depth downsampling. + view_depth_pyramid.downsample_depth( + &format!("{:?}", render_graph_context.label()), + render_context, + view_size, + view_downsample_depth_bind_group, + first_downsample_depth_pipeline, + second_downsample_depth_pipeline, + ); + Ok(()) +} + /// A single depth downsample pipeline. #[derive(Resource)] pub struct DownsampleDepthPipeline { @@ -265,9 +331,9 @@ pub struct DownsampleDepthPipelines { fn create_downsample_depth_pipelines( mut commands: Commands, render_device: Res, - render_adapter: Res, pipeline_cache: Res, mut specialized_compute_pipelines: ResMut>, + gpu_preprocessing_support: Res, mut has_run: Local, ) { // Only run once. @@ -279,13 +345,8 @@ fn create_downsample_depth_pipelines( } *has_run = true; - // If we don't have compute shaders, we can't invoke the downsample depth - // compute shader. - if !render_adapter - .get_downlevel_capabilities() - .flags - .contains(DownlevelFlags::COMPUTE_SHADERS) - { + if !gpu_preprocessing_support.is_culling_supported() { + debug!("Downsample depth is not supported on this platform."); return; } @@ -427,7 +488,7 @@ impl SpecializedComputePipeline for DownsampleDepthPipeline { layout: vec![self.bind_group_layout.clone()], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::COMPUTE, - range: 0..8, + range: 0..4, }], shader: DOWNSAMPLE_DEPTH_SHADER_HANDLE, shader_defs, @@ -627,9 +688,8 @@ impl ViewDepthPyramid { timestamp_writes: None, }); downsample_pass.set_pipeline(downsample_depth_first_pipeline); - // Pass the mip count and the texture width as push constants, for - // simplicity. - downsample_pass.set_push_constants(0, bytemuck::cast_slice(&[self.mip_count, view_size.x])); + // Pass the mip count as a push constant, for simplicity. + downsample_pass.set_push_constants(0, &self.mip_count.to_le_bytes()); downsample_pass.set_bind_group(0, downsample_depth_bind_group, &[]); downsample_pass.dispatch_workgroups(view_size.x.div_ceil(64), view_size.y.div_ceil(64), 1); @@ -641,20 +701,12 @@ impl ViewDepthPyramid { } /// Creates depth pyramids for views that have occlusion culling enabled. -fn prepare_view_depth_pyramids( +pub fn prepare_view_depth_pyramids( mut commands: Commands, render_device: Res, mut texture_cache: ResMut, depth_pyramid_dummy_texture: Res, - views: Query< - (Entity, &ExtractedView), - ( - With, - Without, - With, - Without, - ), - >, + views: Query<(Entity, &ExtractedView), (With, Without)>, ) { for (view_entity, view) in &views { commands.entity(view_entity).insert(ViewDepthPyramid::new( @@ -681,10 +733,21 @@ fn prepare_downsample_depth_view_bind_groups( mut commands: Commands, render_device: Res, downsample_depth_pipelines: Res, - view_depth_textures: Query<(Entity, &ViewDepthPyramid, &ViewDepthTexture)>, + view_depth_textures: Query< + ( + Entity, + &ViewDepthPyramid, + Option<&ViewDepthTexture>, + Option<&OcclusionCullingSubview>, + ), + Or<(With, With)>, + >, ) { - for (view_entity, view_depth_pyramid, view_depth_texture) in &view_depth_textures { - let is_multisampled = view_depth_texture.texture.sample_count() > 1; + for (view_entity, view_depth_pyramid, view_depth_texture, shadow_occlusion_culling) in + &view_depth_textures + { + let is_multisampled = view_depth_texture + .is_some_and(|view_depth_texture| view_depth_texture.texture.sample_count() > 1); commands .entity(view_entity) .insert(ViewDownsampleDepthBindGroup( @@ -702,7 +765,13 @@ fn prepare_downsample_depth_view_bind_groups( } else { &downsample_depth_pipelines.first.bind_group_layout }, - view_depth_texture.view(), + match (view_depth_texture, shadow_occlusion_culling) { + (Some(view_depth_texture), _) => view_depth_texture.view(), + (None, Some(shadow_occlusion_culling)) => { + &shadow_occlusion_culling.depth_texture_view + } + (None, None) => panic!("Should never happen"), + }, &downsample_depth_pipelines.sampler, ), )); diff --git a/crates/bevy_core_pipeline/src/experimental/mod.rs b/crates/bevy_core_pipeline/src/experimental/mod.rs index 4f957477ea..071eb97d86 100644 --- a/crates/bevy_core_pipeline/src/experimental/mod.rs +++ b/crates/bevy_core_pipeline/src/experimental/mod.rs @@ -5,7 +5,3 @@ //! are included nonetheless for testing purposes. pub mod mip_generation; - -pub mod taa { - pub use crate::taa::{TemporalAntiAliasNode, TemporalAntiAliasPlugin, TemporalAntiAliasing}; -} diff --git a/crates/bevy_core_pipeline/src/lib.rs b/crates/bevy_core_pipeline/src/lib.rs index 49b9b7a20b..9e04614276 100644 --- a/crates/bevy_core_pipeline/src/lib.rs +++ b/crates/bevy_core_pipeline/src/lib.rs @@ -9,22 +9,18 @@ pub mod auto_exposure; pub mod blit; pub mod bloom; -pub mod contrast_adaptive_sharpening; pub mod core_2d; pub mod core_3d; pub mod deferred; pub mod dof; pub mod experimental; pub mod fullscreen_vertex_shader; -pub mod fxaa; pub mod motion_blur; pub mod msaa_writeback; pub mod oit; pub mod post_process; pub mod prepass; mod skybox; -pub mod smaa; -mod taa; pub mod tonemapping; pub mod upscaling; @@ -41,19 +37,16 @@ pub mod prelude { use crate::{ blit::BlitPlugin, bloom::BloomPlugin, - contrast_adaptive_sharpening::CasPlugin, core_2d::Core2dPlugin, core_3d::Core3dPlugin, deferred::copy_lighting_id::CopyDeferredLightingIdPlugin, dof::DepthOfFieldPlugin, experimental::mip_generation::MipGenerationPlugin, fullscreen_vertex_shader::FULLSCREEN_SHADER_HANDLE, - fxaa::FxaaPlugin, motion_blur::MotionBlurPlugin, msaa_writeback::MsaaWritebackPlugin, post_process::PostProcessingPlugin, prepass::{DeferredPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass}, - smaa::SmaaPlugin, tonemapping::TonemappingPlugin, upscaling::UpscalingPlugin, }; @@ -85,11 +78,8 @@ impl Plugin for CorePipelinePlugin { TonemappingPlugin, UpscalingPlugin, BloomPlugin, - FxaaPlugin, - CasPlugin, MotionBlurPlugin, DepthOfFieldPlugin, - SmaaPlugin, PostProcessingPlugin, OrderIndependentTransparencyPlugin, MipGenerationPlugin, diff --git a/crates/bevy_core_pipeline/src/motion_blur/mod.rs b/crates/bevy_core_pipeline/src/motion_blur/mod.rs index 7703698f1a..5898f1a8c5 100644 --- a/crates/bevy_core_pipeline/src/motion_blur/mod.rs +++ b/crates/bevy_core_pipeline/src/motion_blur/mod.rs @@ -9,10 +9,10 @@ use crate::{ use bevy_app::{App, Plugin}; use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_ecs::{ - component::{require, Component}, - query::With, + component::Component, + query::{QueryItem, With}, reflect::ReflectComponent, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, }; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ @@ -56,9 +56,8 @@ pub mod pipeline; /// )); /// # } /// ```` -#[derive(Reflect, Component, Clone, ExtractComponent, ShaderType)] -#[reflect(Component, Default)] -#[extract_component_filter(With)] +#[derive(Reflect, Component, Clone)] +#[reflect(Component, Default, Clone)] #[require(DepthPrepass, MotionVectorPrepass)] pub struct MotionBlur { /// The strength of motion blur from `0.0` to `1.0`. @@ -91,9 +90,6 @@ pub struct MotionBlur { /// Setting this to `3` will result in `3 * 2 + 1 = 7` samples. Setting this to `0` is /// equivalent to disabling motion blur. pub samples: u32, - #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] - // WebGL2 structs must be 16 byte aligned. - pub _webgl2_padding: bevy_math::Vec2, } impl Default for MotionBlur { @@ -101,12 +97,35 @@ impl Default for MotionBlur { Self { shutter_angle: 0.5, samples: 1, - #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] - _webgl2_padding: Default::default(), } } } +impl ExtractComponent for MotionBlur { + type QueryData = &'static Self; + type QueryFilter = With; + type Out = MotionBlurUniform; + + fn extract_component(item: QueryItem) -> Option { + Some(MotionBlurUniform { + shutter_angle: item.shutter_angle, + samples: item.samples, + #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] + _webgl2_padding: Default::default(), + }) + } +} + +#[doc(hidden)] +#[derive(Component, ShaderType, Clone)] +pub struct MotionBlurUniform { + shutter_angle: f32, + samples: u32, + #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] + // WebGL2 structs must be 16 byte aligned. + _webgl2_padding: bevy_math::Vec2, +} + pub const MOTION_BLUR_SHADER_HANDLE: Handle = weak_handle!("d9ca74af-fa0a-4f11-b0f2-19613b618b93"); @@ -122,7 +141,7 @@ impl Plugin for MotionBlurPlugin { ); app.add_plugins(( ExtractComponentPlugin::::default(), - UniformComponentPlugin::::default(), + UniformComponentPlugin::::default(), )); let Some(render_app) = app.get_sub_app_mut(RenderApp) else { diff --git a/crates/bevy_core_pipeline/src/motion_blur/node.rs b/crates/bevy_core_pipeline/src/motion_blur/node.rs index 2497bd633d..ade5f50d77 100644 --- a/crates/bevy_core_pipeline/src/motion_blur/node.rs +++ b/crates/bevy_core_pipeline/src/motion_blur/node.rs @@ -15,7 +15,7 @@ use crate::prepass::ViewPrepassTextures; use super::{ pipeline::{MotionBlurPipeline, MotionBlurPipelineId}, - MotionBlur, + MotionBlurUniform, }; #[derive(Default)] @@ -26,7 +26,7 @@ impl ViewNode for MotionBlurNode { &'static ViewTarget, &'static MotionBlurPipelineId, &'static ViewPrepassTextures, - &'static MotionBlur, + &'static MotionBlurUniform, &'static Msaa, ); fn run( @@ -42,7 +42,7 @@ impl ViewNode for MotionBlurNode { let motion_blur_pipeline = world.resource::(); let pipeline_cache = world.resource::(); - let settings_uniforms = world.resource::>(); + let settings_uniforms = world.resource::>(); let Some(pipeline) = pipeline_cache.get_render_pipeline(pipeline_id.0) else { return Ok(()); }; diff --git a/crates/bevy_core_pipeline/src/motion_blur/pipeline.rs b/crates/bevy_core_pipeline/src/motion_blur/pipeline.rs index 61bb7b60ce..4eab4ff7a6 100644 --- a/crates/bevy_core_pipeline/src/motion_blur/pipeline.rs +++ b/crates/bevy_core_pipeline/src/motion_blur/pipeline.rs @@ -26,7 +26,7 @@ use bevy_render::{ use crate::fullscreen_vertex_shader::fullscreen_shader_vertex_state; -use super::{MotionBlur, MOTION_BLUR_SHADER_HANDLE}; +use super::{MotionBlurUniform, MOTION_BLUR_SHADER_HANDLE}; #[derive(Resource)] pub struct MotionBlurPipeline { @@ -49,7 +49,7 @@ impl MotionBlurPipeline { // Linear Sampler sampler(SamplerBindingType::Filtering), // Motion blur settings uniform input - uniform_buffer_sized(false, Some(MotionBlur::min_size())), + uniform_buffer_sized(false, Some(MotionBlurUniform::min_size())), // Globals uniform input uniform_buffer_sized(false, Some(GlobalsUniform::min_size())), ), @@ -67,7 +67,7 @@ impl MotionBlurPipeline { // Linear Sampler sampler(SamplerBindingType::Filtering), // Motion blur settings uniform input - uniform_buffer_sized(false, Some(MotionBlur::min_size())), + uniform_buffer_sized(false, Some(MotionBlurUniform::min_size())), // Globals uniform input uniform_buffer_sized(false, Some(GlobalsUniform::min_size())), ), @@ -155,7 +155,7 @@ pub(crate) fn prepare_motion_blur_pipelines( pipeline_cache: Res, mut pipelines: ResMut>, pipeline: Res, - views: Query<(Entity, &ExtractedView, &Msaa), With>, + views: Query<(Entity, &ExtractedView, &Msaa), With>, ) { for (entity, view, msaa) in &views { let pipeline_id = pipelines.specialize( diff --git a/crates/bevy_core_pipeline/src/oit/mod.rs b/crates/bevy_core_pipeline/src/oit/mod.rs index 63083f5ed7..6a15fd126c 100644 --- a/crates/bevy_core_pipeline/src/oit/mod.rs +++ b/crates/bevy_core_pipeline/src/oit/mod.rs @@ -4,9 +4,9 @@ use bevy_app::prelude::*; use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_ecs::{component::*, prelude::*}; use bevy_math::UVec2; -use bevy_platform_support::collections::HashSet; -use bevy_platform_support::time::Instant; -use bevy_reflect::Reflect; +use bevy_platform::collections::HashSet; +use bevy_platform::time::Instant; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ camera::{Camera, ExtractedCamera}, extract_component::{ExtractComponent, ExtractComponentPlugin}, @@ -44,6 +44,7 @@ pub const OIT_DRAW_SHADER_HANDLE: Handle = // This should probably be done by adding an enum to this component. // We use the same struct to pass on the settings to the drawing shader. #[derive(Clone, Copy, ExtractComponent, Reflect, ShaderType)] +#[reflect(Clone, Default)] pub struct OrderIndependentTransparencySettings { /// Controls how many layers will be used to compute the blending. /// The more layers you use the more memory it will use but it will also give better results. @@ -162,7 +163,7 @@ fn configure_depth_texture_usages( } // Find all the render target that potentially uses OIT - let primary_window = p.get_single().ok(); + let primary_window = p.single().ok(); let mut render_target_has_oit = >::default(); for (camera, has_oit) in &cameras { if has_oit { diff --git a/crates/bevy_core_pipeline/src/oit/resolve/mod.rs b/crates/bevy_core_pipeline/src/oit/resolve/mod.rs index f73192b19d..7db98650fd 100644 --- a/crates/bevy_core_pipeline/src/oit/resolve/mod.rs +++ b/crates/bevy_core_pipeline/src/oit/resolve/mod.rs @@ -6,7 +6,7 @@ use bevy_app::Plugin; use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_derive::Deref; use bevy_ecs::{ - entity::{hash_map::EntityHashMap, hash_set::EntityHashSet}, + entity::{EntityHashMap, EntityHashSet}, prelude::*, }; use bevy_image::BevyDefault as _; @@ -33,6 +33,9 @@ pub const OIT_RESOLVE_SHADER_HANDLE: Handle = /// Contains the render node used to run the resolve pass. pub mod node; +/// Minimum required value of `wgpu::Limits::max_storage_buffers_per_shader_stage`. +pub const OIT_REQUIRED_STORAGE_BUFFERS: u32 = 2; + /// Plugin needed to resolve the Order Independent Transparency (OIT) buffer to the screen. pub struct OitResolvePlugin; impl Plugin for OitResolvePlugin { @@ -50,14 +53,11 @@ impl Plugin for OitResolvePlugin { return; }; - if !render_app - .world() - .resource::() - .get_downlevel_capabilities() - .flags - .contains(DownlevelFlags::FRAGMENT_WRITABLE_STORAGE) - { - warn!("OrderIndependentTransparencyPlugin not loaded. GPU lacks support: DownlevelFlags::FRAGMENT_WRITABLE_STORAGE."); + if !is_oit_supported( + render_app.world().resource::(), + render_app.world().resource::(), + true, + ) { return; } @@ -73,6 +73,34 @@ impl Plugin for OitResolvePlugin { } } +pub fn is_oit_supported(adapter: &RenderAdapter, device: &RenderDevice, warn: bool) -> bool { + if !adapter + .get_downlevel_capabilities() + .flags + .contains(DownlevelFlags::FRAGMENT_WRITABLE_STORAGE) + { + if warn { + warn!("OrderIndependentTransparencyPlugin not loaded. GPU lacks support: DownlevelFlags::FRAGMENT_WRITABLE_STORAGE."); + } + return false; + } + + let max_storage_buffers_per_shader_stage = device.limits().max_storage_buffers_per_shader_stage; + + if max_storage_buffers_per_shader_stage < OIT_REQUIRED_STORAGE_BUFFERS { + if warn { + warn!( + max_storage_buffers_per_shader_stage, + OIT_REQUIRED_STORAGE_BUFFERS, + "OrderIndependentTransparencyPlugin not loaded. RenderDevice lacks support: max_storage_buffers_per_shader_stage < OIT_REQUIRED_STORAGE_BUFFERS." + ); + } + return false; + } + + true +} + /// Bind group for the OIT resolve pass. #[derive(Resource, Deref)] pub struct OitResolveBindGroup(pub BindGroup); diff --git a/crates/bevy_core_pipeline/src/post_process/mod.rs b/crates/bevy_core_pipeline/src/post_process/mod.rs index c8b1da4497..2ac03c08c8 100644 --- a/crates/bevy_core_pipeline/src/post_process/mod.rs +++ b/crates/bevy_core_pipeline/src/post_process/mod.rs @@ -11,7 +11,7 @@ use bevy_ecs::{ query::{QueryItem, With}, reflect::ReflectComponent, resource::Resource, - schedule::IntoSystemConfigs as _, + schedule::IntoScheduleConfigs as _, system::{lifetimeless::Read, Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; @@ -98,7 +98,7 @@ pub struct PostProcessingPlugin; /// /// [Gjøl & Svendsen 2016]: https://github.com/playdeadgames/publications/blob/master/INSIDE/rendering_inside_gdc2016.pdf #[derive(Reflect, Component, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct ChromaticAberration { /// The lookup texture that determines the color gradient. /// diff --git a/crates/bevy_core_pipeline/src/prepass/mod.rs b/crates/bevy_core_pipeline/src/prepass/mod.rs index 1e663a79a4..deea2a5fa8 100644 --- a/crates/bevy_core_pipeline/src/prepass/mod.rs +++ b/crates/bevy_core_pipeline/src/prepass/mod.rs @@ -54,18 +54,18 @@ pub const MOTION_VECTOR_PREPASS_FORMAT: TextureFormat = TextureFormat::Rg16Float /// If added to a [`crate::prelude::Camera3d`] then depth values will be copied to a separate texture available to the main pass. #[derive(Component, Default, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct DepthPrepass; /// If added to a [`crate::prelude::Camera3d`] then vertex world normals will be copied to a separate texture available to the main pass. /// Normals will have normal map textures already applied. #[derive(Component, Default, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct NormalPrepass; /// If added to a [`crate::prelude::Camera3d`] then screen space motion vectors will be copied to a separate texture available to the main pass. #[derive(Component, Default, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct MotionVectorPrepass; /// If added to a [`crate::prelude::Camera3d`] then deferred materials will be rendered to the deferred gbuffer texture and will be available to subsequent passes. diff --git a/crates/bevy_core_pipeline/src/prepass/node.rs b/crates/bevy_core_pipeline/src/prepass/node.rs index 942983b063..04cc1890b0 100644 --- a/crates/bevy_core_pipeline/src/prepass/node.rs +++ b/crates/bevy_core_pipeline/src/prepass/node.rs @@ -66,6 +66,7 @@ impl ViewNode for LatePrepassNode { Option<&'static PreviousViewUniformOffset>, Has, Has, + Has, ); fn run<'w>( @@ -77,7 +78,7 @@ impl ViewNode for LatePrepassNode { ) -> Result<(), NodeRunError> { // We only need a late prepass if we have occlusion culling and indirect // drawing. - let (_, _, _, _, _, _, _, _, _, occlusion_culling, no_indirect_drawing) = query; + let (_, _, _, _, _, _, _, _, _, occlusion_culling, no_indirect_drawing, _) = query; if !occlusion_culling || no_indirect_drawing { return Ok(()); } @@ -110,10 +111,18 @@ fn run_prepass<'w>( view_prev_uniform_offset, _, _, + has_deferred, ): QueryItem<'w, ::ViewQuery>, world: &'w World, label: &'static str, ) -> Result<(), NodeRunError> { + // If we're using deferred rendering, there will be a deferred prepass + // instead of this one. Just bail out so we don't have to bother looking at + // the empty bins. + if has_deferred { + return Ok(()); + } + let (Some(opaque_prepass_phases), Some(alpha_mask_prepass_phases)) = ( world.get_resource::>(), world.get_resource::>(), diff --git a/crates/bevy_core_pipeline/src/skybox/mod.rs b/crates/bevy_core_pipeline/src/skybox/mod.rs index 6f7a3bfa94..7e2dba466c 100644 --- a/crates/bevy_core_pipeline/src/skybox/mod.rs +++ b/crates/bevy_core_pipeline/src/skybox/mod.rs @@ -3,12 +3,14 @@ use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_ecs::{ prelude::{Component, Entity}, query::{QueryItem, With}, + reflect::ReflectComponent, resource::Resource, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Commands, Query, Res, ResMut}, }; use bevy_image::{BevyDefault, Image}; use bevy_math::{Mat4, Quat}; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ camera::Exposure, extract_component::{ @@ -46,7 +48,7 @@ impl Plugin for SkyboxPlugin { Shader::from_wgsl ); - app.add_plugins(( + app.register_type::().add_plugins(( ExtractComponentPlugin::::default(), UniformComponentPlugin::::default(), )); @@ -87,7 +89,8 @@ impl Plugin for SkyboxPlugin { /// To do so, use `EnvironmentMapLight` alongside this component. /// /// See also . -#[derive(Component, Clone)] +#[derive(Component, Clone, Reflect)] +#[reflect(Component, Default, Clone)] pub struct Skybox { pub image: Handle, /// Scale factor applied to the skybox image. diff --git a/crates/bevy_core_pipeline/src/tonemapping/mod.rs b/crates/bevy_core_pipeline/src/tonemapping/mod.rs index 832c2d3fd0..9f3964ad17 100644 --- a/crates/bevy_core_pipeline/src/tonemapping/mod.rs +++ b/crates/bevy_core_pipeline/src/tonemapping/mod.rs @@ -449,8 +449,6 @@ fn setup_tonemapping_lut_image(bytes: &[u8], image_type: ImageType) -> Image { ..default() }); Image::from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] - "Tonemapping LUT sampler".to_string(), bytes, image_type, CompressedImageFormats::NONE, @@ -465,7 +463,7 @@ pub fn lut_placeholder() -> Image { let format = TextureFormat::Rgba8Unorm; let data = vec![255, 0, 255, 255]; Image { - data, + data: Some(data), texture_descriptor: TextureDescriptor { size: Extent3d { width: 1, diff --git a/crates/bevy_core_pipeline/src/upscaling/mod.rs b/crates/bevy_core_pipeline/src/upscaling/mod.rs index 89f3f8d09e..20dd19f4ce 100644 --- a/crates/bevy_core_pipeline/src/upscaling/mod.rs +++ b/crates/bevy_core_pipeline/src/upscaling/mod.rs @@ -1,7 +1,7 @@ use crate::blit::{BlitPipeline, BlitPipelineKey}; use bevy_app::prelude::*; use bevy_ecs::prelude::*; -use bevy_platform_support::collections::HashSet; +use bevy_platform::collections::HashSet; use bevy_render::{ camera::{CameraOutputMode, ExtractedCamera}, render_resource::*, diff --git a/crates/bevy_derive/Cargo.toml b/crates/bevy_derive/Cargo.toml index 3cac10ce09..1c4cb4adcc 100644 --- a/crates/bevy_derive/Cargo.toml +++ b/crates/bevy_derive/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_derive" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides derive implementations for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_derive/compile_fail/Cargo.toml b/crates/bevy_derive/compile_fail/Cargo.toml index 45dcf8aaaf..a9ad3e95e1 100644 --- a/crates/bevy_derive/compile_fail/Cargo.toml +++ b/crates/bevy_derive/compile_fail/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bevy_derive_compile_fail" -edition = "2021" +edition = "2024" description = "Compile fail tests for Bevy Engine's various macros" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_derive/compile_fail/tests/deref_mut_derive/missing_deref_fail.stderr b/crates/bevy_derive/compile_fail/tests/deref_mut_derive/missing_deref_fail.stderr index 0315f2be74..46fec78c43 100644 --- a/crates/bevy_derive/compile_fail/tests/deref_mut_derive/missing_deref_fail.stderr +++ b/crates/bevy_derive/compile_fail/tests/deref_mut_derive/missing_deref_fail.stderr @@ -1,14 +1,11 @@ error[E0277]: the trait bound `TupleStruct: Deref` is not satisfied - --> tests/deref_mut_derive/missing_deref_fail.rs:10:8 - | -10 | struct TupleStruct(usize, #[deref] String); - | ^^^^^^^^^^^ the trait `Deref` is not implemented for `TupleStruct` - | + --> tests/deref_mut_derive/missing_deref_fail.rs:9:8 + | +9 | struct TupleStruct(usize, #[deref] String); + | ^^^^^^^^^^^ the trait `Deref` is not implemented for `TupleStruct` + | note: required by a bound in `DerefMut` - --> $RUSTUP_HOME/.rustup/toolchains/stable-x86_64-pc-windows-msvc/lib/rustlib/src/rust/library/core/src/ops/deref.rs:264:21 - | -264 | pub trait DerefMut: Deref { - | ^^^^^ required by this bound in `DerefMut` + --> /rustc/4d91de4e48198da2e33413efdcd9cd2cc0c46688/library/core/src/ops/deref.rs:290:1 error[E0277]: the trait bound `TupleStruct: Deref` is not satisfied --> tests/deref_mut_derive/missing_deref_fail.rs:7:10 @@ -19,21 +16,18 @@ error[E0277]: the trait bound `TupleStruct: Deref` is not satisfied = note: this error originates in the derive macro `DerefMut` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Struct: Deref` is not satisfied - --> tests/deref_mut_derive/missing_deref_fail.rs:15:8 - | -15 | struct Struct { - | ^^^^^^ the trait `Deref` is not implemented for `Struct` - | + --> tests/deref_mut_derive/missing_deref_fail.rs:14:8 + | +14 | struct Struct { + | ^^^^^^ the trait `Deref` is not implemented for `Struct` + | note: required by a bound in `DerefMut` - --> $RUSTUP_HOME/.rustup/toolchains/stable-x86_64-pc-windows-msvc/lib/rustlib/src/rust/library/core/src/ops/deref.rs:264:21 - | -264 | pub trait DerefMut: Deref { - | ^^^^^ required by this bound in `DerefMut` + --> /rustc/4d91de4e48198da2e33413efdcd9cd2cc0c46688/library/core/src/ops/deref.rs:290:1 error[E0277]: the trait bound `Struct: Deref` is not satisfied - --> tests/deref_mut_derive/missing_deref_fail.rs:13:10 + --> tests/deref_mut_derive/missing_deref_fail.rs:12:10 | -13 | #[derive(DerefMut)] +12 | #[derive(DerefMut)] | ^^^^^^^^ the trait `Deref` is not implemented for `Struct` | = note: this error originates in the derive macro `DerefMut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/crates/bevy_derive/src/bevy_main.rs b/crates/bevy_derive/src/bevy_main.rs index 8111a31338..6481823ad4 100644 --- a/crates/bevy_derive/src/bevy_main.rs +++ b/crates/bevy_derive/src/bevy_main.rs @@ -10,19 +10,16 @@ pub fn bevy_main(_attr: TokenStream, item: TokenStream) -> TokenStream { ); TokenStream::from(quote! { - #[no_mangle] + // SAFETY: `#[bevy_main]` should only be placed on a single `main` function + // TODO: Potentially make `bevy_main` and unsafe attribute as there is a safety + // guarantee required from the caller. + #[unsafe(no_mangle)] #[cfg(target_os = "android")] fn android_main(android_app: bevy::window::android_activity::AndroidApp) { let _ = bevy::window::ANDROID_APP.set(android_app); main(); } - #[no_mangle] - #[cfg(target_os = "ios")] - extern "C" fn main_rs() { - main(); - } - #[allow(unused)] #input }) diff --git a/crates/bevy_dev_tools/Cargo.toml b/crates/bevy_dev_tools/Cargo.toml index 0b9618d20d..ad0f2c515c 100644 --- a/crates/bevy_dev_tools/Cargo.toml +++ b/crates/bevy_dev_tools/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_dev_tools" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Collection of developer tools for the Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_dev_tools/src/fps_overlay.rs b/crates/bevy_dev_tools/src/fps_overlay.rs index 358a7f2045..7c29ae3adc 100644 --- a/crates/bevy_dev_tools/src/fps_overlay.rs +++ b/crates/bevy_dev_tools/src/fps_overlay.rs @@ -11,7 +11,7 @@ use bevy_ecs::{ prelude::Local, query::With, resource::Resource, - schedule::{common_conditions::resource_changed, IntoSystemConfigs}, + schedule::{common_conditions::resource_changed, IntoScheduleConfigs}, system::{Commands, Query, Res}, }; use bevy_render::view::Visibility; diff --git a/crates/bevy_dev_tools/src/lib.rs b/crates/bevy_dev_tools/src/lib.rs index 0f9dc75611..1dfd473409 100644 --- a/crates/bevy_dev_tools/src/lib.rs +++ b/crates/bevy_dev_tools/src/lib.rs @@ -29,7 +29,7 @@ pub mod states; /// To enable developer tools, you can either: /// /// - Create a custom crate feature (e.g "`dev_mode`"), which enables the `bevy_dev_tools` feature -/// along with any other development tools you might be using: +/// along with any other development tools you might be using: /// /// ```toml /// [feature] diff --git a/crates/bevy_dev_tools/src/picking_debug.rs b/crates/bevy_dev_tools/src/picking_debug.rs index 37defb5578..f72b70fc88 100644 --- a/crates/bevy_dev_tools/src/picking_debug.rs +++ b/crates/bevy_dev_tools/src/picking_debug.rs @@ -260,7 +260,7 @@ pub fn debug_draw( .map(|(entity, camera)| { ( entity, - camera.target.normalize(primary_window.get_single().ok()), + camera.target.normalize(primary_window.single().ok()), ) }) .filter_map(|(entity, target)| Some(entity).zip(target)) diff --git a/crates/bevy_diagnostic/Cargo.toml b/crates/bevy_diagnostic/Cargo.toml index a21e09eb9a..92e64b7e6c 100644 --- a/crates/bevy_diagnostic/Cargo.toml +++ b/crates/bevy_diagnostic/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_diagnostic" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides diagnostic functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -19,14 +19,14 @@ serialize = [ "bevy_ecs/serialize", "bevy_time/serialize", "bevy_utils/serde", - "bevy_platform_support/serialize", + "bevy_platform/serialize", ] ## Disables diagnostics that are unsupported when Bevy is dynamically linked dynamic_linking = [] ## Adds integration with `sysinfo`. -sysinfo_plugin = ["sysinfo", "dep:bevy_tasks"] +sysinfo_plugin = ["sysinfo"] # Platform Compatibility @@ -37,10 +37,10 @@ std = [ "serde?/std", "bevy_ecs/std", "bevy_app/std", - "bevy_platform_support/std", + "bevy_platform/std", "bevy_time/std", "bevy_utils/std", - "bevy_tasks?/std", + "bevy_tasks/std", ] ## `critical-section` provides the building blocks for synchronization primitives @@ -48,21 +48,10 @@ std = [ critical-section = [ "bevy_ecs/critical-section", "bevy_app/critical-section", - "bevy_platform_support/critical-section", + "bevy_platform/critical-section", "bevy_time/critical-section", "bevy_utils/critical-section", - "bevy_tasks?/critical-section", -] - -## `portable-atomic` provides additional platform support for atomic types and -## operations, even on targets without native support. -portable-atomic = [ - "bevy_ecs/portable-atomic", - "bevy_app/portable-atomic", - "bevy_platform_support/portable-atomic", - "bevy_time/portable-atomic", - "bevy_utils/portable-atomic", - "bevy_tasks?/portable-atomic", + "bevy_tasks/critical-section", ] [dependencies] @@ -73,8 +62,8 @@ bevy_time = { path = "../bevy_time", version = "0.16.0-dev", default-features = bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev", default-features = false, features = [ "alloc", ] } -bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false, optional = true } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "alloc", ] } @@ -88,14 +77,14 @@ log = { version = "0.4", default-features = false } # macOS [target.'cfg(all(target_os="macos"))'.dependencies] # Some features of sysinfo are not supported by apple. This will disable those features on apple devices -sysinfo = { version = "0.33.0", optional = true, default-features = false, features = [ +sysinfo = { version = "0.34.0", optional = true, default-features = false, features = [ "apple-app-store", "system", ] } # Only include when on linux/windows/android/freebsd [target.'cfg(any(target_os = "linux", target_os = "windows", target_os = "android", target_os = "freebsd"))'.dependencies] -sysinfo = { version = "0.33.0", optional = true, default-features = false, features = [ +sysinfo = { version = "0.34.0", optional = true, default-features = false, features = [ "system", ] } diff --git a/crates/bevy_diagnostic/src/diagnostic.rs b/crates/bevy_diagnostic/src/diagnostic.rs index af9a3e71e0..00a758416b 100644 --- a/crates/bevy_diagnostic/src/diagnostic.rs +++ b/crates/bevy_diagnostic/src/diagnostic.rs @@ -7,7 +7,7 @@ use core::{ use bevy_app::{App, SubApp}; use bevy_ecs::resource::Resource; use bevy_ecs::system::{Deferred, Res, SystemBuffer, SystemParam}; -use bevy_platform_support::{collections::HashMap, hash::PassHash, time::Instant}; +use bevy_platform::{collections::HashMap, hash::PassHash, time::Instant}; use const_fnv1a_hash::fnv1a_hash_str_64; use crate::DEFAULT_MAX_HISTORY_LENGTH; diff --git a/crates/bevy_diagnostic/src/log_diagnostics_plugin.rs b/crates/bevy_diagnostic/src/log_diagnostics_plugin.rs index 6a8c761c0b..1246b03f81 100644 --- a/crates/bevy_diagnostic/src/log_diagnostics_plugin.rs +++ b/crates/bevy_diagnostic/src/log_diagnostics_plugin.rs @@ -92,7 +92,7 @@ impl LogDiagnosticsPlugin { }; info!( - target: "bevy diagnostic", + target: "bevy_diagnostic", // Suffix is only used for 's' or 'ms' currently, // so we reserve two columns for it; however, // Do not reserve columns for the suffix in the average @@ -103,7 +103,7 @@ impl LogDiagnosticsPlugin { ); } else { info!( - target: "bevy diagnostic", + target: "bevy_diagnostic", "{path:.6}{suffix:}", path = diagnostic.path(), suffix = diagnostic.suffix, diff --git a/crates/bevy_diagnostic/src/system_information_diagnostics_plugin.rs b/crates/bevy_diagnostic/src/system_information_diagnostics_plugin.rs index 55616fca4b..376a109ae3 100644 --- a/crates/bevy_diagnostic/src/system_information_diagnostics_plugin.rs +++ b/crates/bevy_diagnostic/src/system_information_diagnostics_plugin.rs @@ -29,9 +29,13 @@ impl Plugin for SystemInformationDiagnosticsPlugin { impl SystemInformationDiagnosticsPlugin { /// Total system cpu usage in % - pub const CPU_USAGE: DiagnosticPath = DiagnosticPath::const_new("system/cpu_usage"); + pub const SYSTEM_CPU_USAGE: DiagnosticPath = DiagnosticPath::const_new("system/cpu_usage"); /// Total system memory usage in % - pub const MEM_USAGE: DiagnosticPath = DiagnosticPath::const_new("system/mem_usage"); + pub const SYSTEM_MEM_USAGE: DiagnosticPath = DiagnosticPath::const_new("system/mem_usage"); + /// Process cpu usage in % + pub const PROCESS_CPU_USAGE: DiagnosticPath = DiagnosticPath::const_new("process/cpu_usage"); + /// Process memory usage in % + pub const PROCESS_MEM_USAGE: DiagnosticPath = DiagnosticPath::const_new("process/mem_usage"); } /// A resource that stores diagnostic information about the system. @@ -70,7 +74,7 @@ pub mod internal { use bevy_app::{App, First, Startup, Update}; use bevy_ecs::resource::Resource; use bevy_ecs::{prelude::ResMut, system::Local}; - use bevy_platform_support::time::Instant; + use bevy_platform::time::Instant; use bevy_tasks::{available_parallelism, block_on, poll_once, AsyncComputeTaskPool, Task}; use log::info; use std::sync::Mutex; @@ -90,15 +94,26 @@ pub mod internal { } fn setup_system(mut diagnostics: ResMut) { - diagnostics - .add(Diagnostic::new(SystemInformationDiagnosticsPlugin::CPU_USAGE).with_suffix("%")); - diagnostics - .add(Diagnostic::new(SystemInformationDiagnosticsPlugin::MEM_USAGE).with_suffix("%")); + diagnostics.add( + Diagnostic::new(SystemInformationDiagnosticsPlugin::SYSTEM_CPU_USAGE).with_suffix("%"), + ); + diagnostics.add( + Diagnostic::new(SystemInformationDiagnosticsPlugin::SYSTEM_MEM_USAGE).with_suffix("%"), + ); + diagnostics.add( + Diagnostic::new(SystemInformationDiagnosticsPlugin::PROCESS_CPU_USAGE).with_suffix("%"), + ); + diagnostics.add( + Diagnostic::new(SystemInformationDiagnosticsPlugin::PROCESS_MEM_USAGE) + .with_suffix("GiB"), + ); } struct SysinfoRefreshData { - current_cpu_usage: f64, - current_used_mem: f64, + system_cpu_usage: f64, + system_mem_usage: f64, + process_cpu_usage: f64, + process_mem_usage: f64, } #[derive(Resource, Default)] @@ -135,18 +150,31 @@ pub mod internal { let sys = Arc::clone(sysinfo); let task = thread_pool.spawn(async move { let mut sys = sys.lock().unwrap(); + let pid = sysinfo::get_current_pid().expect("Failed to get current process ID"); + sys.refresh_processes(sysinfo::ProcessesToUpdate::Some(&[pid]), true); sys.refresh_cpu_specifics(CpuRefreshKind::nothing().with_cpu_usage()); sys.refresh_memory(); - let current_cpu_usage = sys.global_cpu_usage().into(); - // `memory()` fns return a value in bytes - let total_mem = sys.total_memory() as f64 / BYTES_TO_GIB; - let used_mem = sys.used_memory() as f64 / BYTES_TO_GIB; - let current_used_mem = used_mem / total_mem * 100.0; + let system_cpu_usage = sys.global_cpu_usage().into(); + let total_mem = sys.total_memory() as f64; + let used_mem = sys.used_memory() as f64; + let system_mem_usage = used_mem / total_mem * 100.0; + + let process_mem_usage = sys + .process(pid) + .map(|p| p.memory() as f64 * BYTES_TO_GIB) + .unwrap_or(0.0); + + let process_cpu_usage = sys + .process(pid) + .map(|p| p.cpu_usage() as f64 / sys.cpus().len() as f64) + .unwrap_or(0.0); SysinfoRefreshData { - current_cpu_usage, - current_used_mem, + system_cpu_usage, + system_mem_usage, + process_cpu_usage, + process_mem_usage, } }); tasks.tasks.push(task); @@ -160,12 +188,22 @@ pub mod internal { return true; }; - diagnostics.add_measurement(&SystemInformationDiagnosticsPlugin::CPU_USAGE, || { - data.current_cpu_usage - }); - diagnostics.add_measurement(&SystemInformationDiagnosticsPlugin::MEM_USAGE, || { - data.current_used_mem - }); + diagnostics.add_measurement( + &SystemInformationDiagnosticsPlugin::SYSTEM_CPU_USAGE, + || data.system_cpu_usage, + ); + diagnostics.add_measurement( + &SystemInformationDiagnosticsPlugin::SYSTEM_MEM_USAGE, + || data.system_mem_usage, + ); + diagnostics.add_measurement( + &SystemInformationDiagnosticsPlugin::PROCESS_CPU_USAGE, + || data.process_cpu_usage, + ); + diagnostics.add_measurement( + &SystemInformationDiagnosticsPlugin::PROCESS_MEM_USAGE, + || data.process_mem_usage, + ); false }); } @@ -186,8 +224,7 @@ pub mod internal { .first() .map(|cpu| cpu.brand().trim().to_string()) .unwrap_or_else(|| String::from("not available")), - core_count: sys - .physical_core_count() + core_count: System::physical_core_count() .map(|x| x.to_string()) .unwrap_or_else(|| String::from("not available")), // Convert from Bytes to GibiBytes since it's probably what people expect most of the time diff --git a/crates/bevy_dylib/Cargo.toml b/crates/bevy_dylib/Cargo.toml index de96856f92..26aec33b83 100644 --- a/crates/bevy_dylib/Cargo.toml +++ b/crates/bevy_dylib/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_dylib" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Force the Bevy Engine to be dynamically linked for faster linking" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_ecs/Cargo.toml b/crates/bevy_ecs/Cargo.toml index 0106286c52..97cdcee082 100644 --- a/crates/bevy_ecs/Cargo.toml +++ b/crates/bevy_ecs/Cargo.toml @@ -1,17 +1,17 @@ [package] name = "bevy_ecs" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Bevy Engine's entity component system" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["ecs", "game", "bevy"] categories = ["game-engines", "data-structures"] -rust-version = "1.83.0" +rust-version = "1.85.0" [features] -default = ["std", "bevy_reflect", "async_executor"] +default = ["std", "bevy_reflect", "async_executor", "backtrace"] # Functionality @@ -23,7 +23,7 @@ multi_threaded = ["bevy_tasks/multi_threaded", "dep:arrayvec"] serialize = [ "dep:serde", "bevy_utils/serde", - "bevy_platform_support/serialize", + "bevy_platform/serialize", "indexmap/serde", ] @@ -33,9 +33,16 @@ bevy_reflect = ["dep:bevy_reflect"] ## Extends reflection support to functions. reflect_functions = ["bevy_reflect", "bevy_reflect/functions"] -## Use the configurable global error handler as the default error handler +## Use the configurable global error handler as the default error handler. +## +## This is typically used to turn panics from the ECS into loggable errors. +## This may be useful for production builds, +## but can result in a measurable performance impact, especially for commands. configurable_error_handler = [] +## Enables automatic backtrace capturing in BevyError +backtrace = ["std"] + # Debugging Features ## Enables `tracing` integration, allowing spans and other metrics to be reported @@ -57,11 +64,7 @@ track_location = [] ## Uses `async-executor` as a task execution backend. ## This backend is incompatible with `no_std` targets. -async_executor = ["dep:bevy_tasks", "std", "bevy_tasks/async_executor"] - -## Uses `edge-executor` as a task execution backend. -## Use this instead of `async-executor` if working on a `no_std` target. -edge_executor = ["dep:bevy_tasks", "bevy_tasks/edge_executor"] +async_executor = ["std", "bevy_tasks/async_executor"] # Platform Compatibility @@ -81,40 +84,32 @@ std = [ "nonmax/std", "arrayvec?/std", "log/std", - "bevy_platform_support/std", + "bevy_platform/std", ] ## `critical-section` provides the building blocks for synchronization primitives ## on all platforms, including `no_std`. critical-section = [ - "bevy_tasks?/critical-section", - "bevy_platform_support/critical-section", + "bevy_tasks/critical-section", + "bevy_platform/critical-section", "bevy_reflect?/critical-section", ] -## `portable-atomic` provides additional platform support for atomic types and -## operations, even on targets without native support. -portable-atomic = [ - "bevy_tasks?/portable-atomic", - "bevy_platform_support/portable-atomic", - "concurrent-queue/portable-atomic", - "bevy_reflect?/portable-atomic", -] - [dependencies] bevy_ptr = { path = "../bevy_ptr", version = "0.16.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, optional = true } -bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false, optional = true } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ + "smallvec", +], default-features = false, optional = true } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev", default-features = false, features = [ "alloc", ] } bevy_ecs_macros = { path = "macros", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "alloc", ] } bitflags = { version = "2.3", default-features = false } -concurrent-queue = { version = "2.5.0", default-features = false } disqualified = { version = "1.0", default-features = false } fixedbitset = { version = "0.5", default-features = false } serde = { version = "1", default-features = false, features = [ @@ -137,6 +132,12 @@ tracing = { version = "0.1", default-features = false, optional = true } log = { version = "0.4", default-features = false } bumpalo = "3" +concurrent-queue = { version = "2.5.0", default-features = false } +[target.'cfg(not(all(target_has_atomic = "8", target_has_atomic = "16", target_has_atomic = "32", target_has_atomic = "64", target_has_atomic = "ptr")))'.dependencies] +concurrent-queue = { version = "2.5.0", default-features = false, features = [ + "portable-atomic", +] } + [dev-dependencies] rand = "0.8" static_assertions = "1.1.0" diff --git a/crates/bevy_ecs/compile_fail/Cargo.toml b/crates/bevy_ecs/compile_fail/Cargo.toml index 76f7ec8b8a..48e3857f53 100644 --- a/crates/bevy_ecs/compile_fail/Cargo.toml +++ b/crates/bevy_ecs/compile_fail/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bevy_ecs_compile_fail" -edition = "2021" +edition = "2024" description = "Compile fail tests for Bevy Engine's entity component system" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_ecs/compile_fail/tests/ui/component_hook_call_signature_mismatch.rs b/crates/bevy_ecs/compile_fail/tests/ui/component_hook_call_signature_mismatch.rs new file mode 100644 index 0000000000..ef6b98cf09 --- /dev/null +++ b/crates/bevy_ecs/compile_fail/tests/ui/component_hook_call_signature_mismatch.rs @@ -0,0 +1,14 @@ +use bevy_ecs::prelude::*; + +// this should fail since the function is required to have the signature +// (DeferredWorld, HookContext) -> () +#[derive(Component)] +//~^ E0057 +#[component( + on_add = wrong_bazzing("foo"), +)] +pub struct FooWrongCall; + +fn wrong_bazzing(path: &str) -> impl Fn(bevy_ecs::world::DeferredWorld) { + |world| {} +} diff --git a/crates/bevy_ecs/compile_fail/tests/ui/component_hook_call_signature_mismatch.stderr b/crates/bevy_ecs/compile_fail/tests/ui/component_hook_call_signature_mismatch.stderr new file mode 100644 index 0000000000..967cffe4ff --- /dev/null +++ b/crates/bevy_ecs/compile_fail/tests/ui/component_hook_call_signature_mismatch.stderr @@ -0,0 +1,32 @@ +warning: unused variable: `path` + --> tests/ui/component_hook_call_signature_mismatch.rs:12:18 + | +12 | fn wrong_bazzing(path: &str) -> impl Fn(bevy_ecs::world::DeferredWorld) { + | ^^^^ help: if this is intentional, prefix it with an underscore: `_path` + | + = note: `#[warn(unused_variables)]` on by default + +warning: unused variable: `world` + --> tests/ui/component_hook_call_signature_mismatch.rs:13:6 + | +13 | |world| {} + | ^^^^^ help: if this is intentional, prefix it with an underscore: `_world` + +error[E0057]: this function takes 1 argument but 2 arguments were supplied + --> tests/ui/component_hook_call_signature_mismatch.rs:8:14 + | +5 | #[derive(Component)] + | --------- unexpected argument #2 of type `HookContext` +... +8 | on_add = wrong_bazzing("foo"), + | ^^^^^^^^^^^^^^^^^^^^ + | +note: opaque type defined here + --> tests/ui/component_hook_call_signature_mismatch.rs:12:33 + | +12 | fn wrong_bazzing(path: &str) -> impl Fn(bevy_ecs::world::DeferredWorld) { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: aborting due to 1 previous error; 2 warnings emitted + +For more information about this error, try `rustc --explain E0057`. diff --git a/crates/bevy_ecs/compile_fail/tests/ui/component_hook_relationship.rs b/crates/bevy_ecs/compile_fail/tests/ui/component_hook_relationship.rs new file mode 100644 index 0000000000..4076819ee3 --- /dev/null +++ b/crates/bevy_ecs/compile_fail/tests/ui/component_hook_relationship.rs @@ -0,0 +1,63 @@ +use bevy_ecs::prelude::*; + +mod case1 { + use super::*; + + #[derive(Component, Debug)] + #[component(on_insert = foo_hook)] + //~^ ERROR: Custom on_insert hooks are not supported as relationships already define an on_insert hook + #[relationship(relationship_target = FooTargets)] + pub struct FooTargetOfFail(Entity); + + #[derive(Component, Debug)] + #[relationship_target(relationship = FooTargetOfFail)] + //~^ E0277 + pub struct FooTargets(Vec); +} + +mod case2 { + use super::*; + + #[derive(Component, Debug)] + #[component(on_replace = foo_hook)] + //~^ ERROR: Custom on_replace hooks are not supported as RelationshipTarget already defines an on_replace hook + #[relationship_target(relationship = FooTargetOf)] + pub struct FooTargetsFail(Vec); + + #[derive(Component, Debug)] + #[relationship(relationship_target = FooTargetsFail)] + //~^ E0277 + pub struct FooTargetOf(Entity); +} + +mod case3 { + use super::*; + + #[derive(Component, Debug)] + #[component(on_replace = foo_hook)] + //~^ ERROR: Custom on_replace hooks are not supported as Relationships already define an on_replace hook + #[relationship(relationship_target = BarTargets)] + pub struct BarTargetOfFail(Entity); + + #[derive(Component, Debug)] + #[relationship_target(relationship = BarTargetOfFail)] + //~^ E0277 + pub struct BarTargets(Vec); +} + +mod case4 { + use super::*; + + #[derive(Component, Debug)] + #[component(on_despawn = foo_hook)] + //~^ ERROR: Custom on_despawn hooks are not supported as this RelationshipTarget already defines an on_despawn hook, via the 'linked_spawn' attribute + #[relationship_target(relationship = BarTargetOf, linked_spawn)] + pub struct BarTargetsFail(Vec); + + #[derive(Component, Debug)] + #[relationship(relationship_target = BarTargetsFail)] + //~^ E0277 + pub struct BarTargetOf(Entity); +} + +fn foo_hook(_world: bevy_ecs::world::DeferredWorld, _ctx: bevy_ecs::component::HookContext) {} diff --git a/crates/bevy_ecs/compile_fail/tests/ui/component_hook_relationship.stderr b/crates/bevy_ecs/compile_fail/tests/ui/component_hook_relationship.stderr new file mode 100644 index 0000000000..01e4d57578 --- /dev/null +++ b/crates/bevy_ecs/compile_fail/tests/ui/component_hook_relationship.stderr @@ -0,0 +1,91 @@ +error: Custom on_insert hooks are not supported as relationships already define an on_insert hook + --> tests/ui/component_hook_relationship.rs:7:5 + | +7 | #[component(on_insert = foo_hook)] + | ^ + +error: Custom on_replace hooks are not supported as RelationshipTarget already defines an on_replace hook + --> tests/ui/component_hook_relationship.rs:22:5 + | +22 | #[component(on_replace = foo_hook)] + | ^ + +error: Custom on_replace hooks are not supported as Relationships already define an on_replace hook + --> tests/ui/component_hook_relationship.rs:37:5 + | +37 | #[component(on_replace = foo_hook)] + | ^ + +error: Custom on_despawn hooks are not supported as this RelationshipTarget already defines an on_despawn hook, via the 'linked_spawn' attribute + --> tests/ui/component_hook_relationship.rs:52:5 + | +52 | #[component(on_despawn = foo_hook)] + | ^ + +error[E0277]: the trait bound `FooTargetOfFail: Relationship` is not satisfied + --> tests/ui/component_hook_relationship.rs:13:42 + | +13 | #[relationship_target(relationship = FooTargetOfFail)] + | ^^^^^^^^^^^^^^^ the trait `Relationship` is not implemented for `FooTargetOfFail` + | + = help: the following other types implement trait `Relationship`: + BarTargetOf + ChildOf + FooTargetOf +note: required by a bound in `bevy_ecs::relationship::RelationshipTarget::Relationship` + --> $BEVY_ROOT/bevy_ecs/src/relationship/mod.rs:167:24 + | +167 | type Relationship: Relationship; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RelationshipTarget::Relationship` + +error[E0277]: the trait bound `FooTargetsFail: bevy_ecs::relationship::RelationshipTarget` is not satisfied + --> tests/ui/component_hook_relationship.rs:28:42 + | +28 | #[relationship(relationship_target = FooTargetsFail)] + | ^^^^^^^^^^^^^^ the trait `bevy_ecs::relationship::RelationshipTarget` is not implemented for `FooTargetsFail` + | + = help: the following other types implement trait `bevy_ecs::relationship::RelationshipTarget`: + BarTargets + Children + FooTargets +note: required by a bound in `bevy_ecs::relationship::Relationship::RelationshipTarget` + --> $BEVY_ROOT/bevy_ecs/src/relationship/mod.rs:79:30 + | +79 | type RelationshipTarget: RelationshipTarget; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `Relationship::RelationshipTarget` + +error[E0277]: the trait bound `BarTargetOfFail: Relationship` is not satisfied + --> tests/ui/component_hook_relationship.rs:43:42 + | +43 | #[relationship_target(relationship = BarTargetOfFail)] + | ^^^^^^^^^^^^^^^ the trait `Relationship` is not implemented for `BarTargetOfFail` + | + = help: the following other types implement trait `Relationship`: + BarTargetOf + ChildOf + FooTargetOf +note: required by a bound in `bevy_ecs::relationship::RelationshipTarget::Relationship` + --> $BEVY_ROOT/bevy_ecs/src/relationship/mod.rs:167:24 + | +167 | type Relationship: Relationship; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RelationshipTarget::Relationship` + +error[E0277]: the trait bound `BarTargetsFail: bevy_ecs::relationship::RelationshipTarget` is not satisfied + --> tests/ui/component_hook_relationship.rs:58:42 + | +58 | #[relationship(relationship_target = BarTargetsFail)] + | ^^^^^^^^^^^^^^ the trait `bevy_ecs::relationship::RelationshipTarget` is not implemented for `BarTargetsFail` + | + = help: the following other types implement trait `bevy_ecs::relationship::RelationshipTarget`: + BarTargets + Children + FooTargets +note: required by a bound in `bevy_ecs::relationship::Relationship::RelationshipTarget` + --> $BEVY_ROOT/bevy_ecs/src/relationship/mod.rs:79:30 + | +79 | type RelationshipTarget: RelationshipTarget; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `Relationship::RelationshipTarget` + +error: aborting due to 8 previous errors + +For more information about this error, try `rustc --explain E0277`. diff --git a/crates/bevy_ecs/compile_fail/tests/ui/component_hook_struct_path.rs b/crates/bevy_ecs/compile_fail/tests/ui/component_hook_struct_path.rs new file mode 100644 index 0000000000..7670a26106 --- /dev/null +++ b/crates/bevy_ecs/compile_fail/tests/ui/component_hook_struct_path.rs @@ -0,0 +1,14 @@ +use bevy_ecs::prelude::*; + +// the proc macro allows general paths, which means normal structs are also passing the basic +// parsing. This test makes sure that we don't accidentally allow structs as hooks through future +// changes. +// +// Currently the error is thrown in the generated code and not while executing the proc macro +// logic. +#[derive(Component)] +#[component( + on_add = Bar, + //~^ E0425 +)] +pub struct FooWrongPath; diff --git a/crates/bevy_ecs/compile_fail/tests/ui/component_hook_struct_path.stderr b/crates/bevy_ecs/compile_fail/tests/ui/component_hook_struct_path.stderr new file mode 100644 index 0000000000..4415582709 --- /dev/null +++ b/crates/bevy_ecs/compile_fail/tests/ui/component_hook_struct_path.stderr @@ -0,0 +1,9 @@ +error[E0425]: cannot find value `Bar` in this scope + --> tests/ui/component_hook_struct_path.rs:11:14 + | +11 | on_add = Bar, + | ^^^ not found in this scope + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0425`. diff --git a/crates/bevy_ecs/compile_fail/tests/ui/query_exact_sized_iterator_safety.stderr b/crates/bevy_ecs/compile_fail/tests/ui/query_exact_sized_iterator_safety.stderr index 2be0bb29e9..ba14761f58 100644 --- a/crates/bevy_ecs/compile_fail/tests/ui/query_exact_sized_iterator_safety.stderr +++ b/crates/bevy_ecs/compile_fail/tests/ui/query_exact_sized_iterator_safety.stderr @@ -1,20 +1,22 @@ -error[E0277]: the trait bound `bevy_ecs::query::Changed: ArchetypeFilter` is not satisfied +error[E0277]: `bevy_ecs::query::Changed` is not a valid `Query` filter based on archetype information --> tests/ui/query_exact_sized_iterator_safety.rs:7:28 | 7 | is_exact_size_iterator(query.iter()); - | ---------------------- ^^^^^^^^^^^^ the trait `ArchetypeFilter` is not implemented for `bevy_ecs::query::Changed`, which is required by `QueryIter<'_, '_, &Foo, bevy_ecs::query::Changed>: ExactSizeIterator` + | ---------------------- ^^^^^^^^^^^^ invalid `Query` filter | | | required by a bound introduced by this call | + = help: the trait `ArchetypeFilter` is not implemented for `bevy_ecs::query::Changed` + = note: an `ArchetypeFilter` typically uses a combination of `With` and `Without` statements = help: the following other types implement trait `ArchetypeFilter`: - With - Without - Or<()> - Or<(F0,)> - Or<(F0, F1)> - Or<(F0, F1, F2)> - Or<(F0, F1, F2, F3)> - Or<(F0, F1, F2, F3, F4)> + () + (F,) + (F0, F1) + (F0, F1, F2) + (F0, F1, F2, F3) + (F0, F1, F2, F3, F4) + (F0, F1, F2, F3, F4, F5) + (F0, F1, F2, F3, F4, F5, F6) and 26 others = note: required for `QueryIter<'_, '_, &Foo, bevy_ecs::query::Changed>` to implement `ExactSizeIterator` note: required by a bound in `is_exact_size_iterator` @@ -23,23 +25,25 @@ note: required by a bound in `is_exact_size_iterator` 16 | fn is_exact_size_iterator(_iter: T) {} | ^^^^^^^^^^^^^^^^^ required by this bound in `is_exact_size_iterator` -error[E0277]: the trait bound `bevy_ecs::query::Added: ArchetypeFilter` is not satisfied +error[E0277]: `bevy_ecs::query::Added` is not a valid `Query` filter based on archetype information --> tests/ui/query_exact_sized_iterator_safety.rs:12:28 | 12 | is_exact_size_iterator(query.iter()); - | ---------------------- ^^^^^^^^^^^^ the trait `ArchetypeFilter` is not implemented for `bevy_ecs::query::Added`, which is required by `QueryIter<'_, '_, &Foo, bevy_ecs::query::Added>: ExactSizeIterator` + | ---------------------- ^^^^^^^^^^^^ invalid `Query` filter | | | required by a bound introduced by this call | + = help: the trait `ArchetypeFilter` is not implemented for `bevy_ecs::query::Added` + = note: an `ArchetypeFilter` typically uses a combination of `With` and `Without` statements = help: the following other types implement trait `ArchetypeFilter`: - With - Without - Or<()> - Or<(F0,)> - Or<(F0, F1)> - Or<(F0, F1, F2)> - Or<(F0, F1, F2, F3)> - Or<(F0, F1, F2, F3, F4)> + () + (F,) + (F0, F1) + (F0, F1, F2) + (F0, F1, F2, F3) + (F0, F1, F2, F3, F4) + (F0, F1, F2, F3, F4, F5) + (F0, F1, F2, F3, F4, F5, F6) and 26 others = note: required for `QueryIter<'_, '_, &Foo, bevy_ecs::query::Added>` to implement `ExactSizeIterator` note: required by a bound in `is_exact_size_iterator` diff --git a/crates/bevy_ecs/compile_fail/tests/ui/query_iter_combinations_mut_iterator_safety.stderr b/crates/bevy_ecs/compile_fail/tests/ui/query_iter_combinations_mut_iterator_safety.stderr index be1e31bd14..cb40b8f11c 100644 --- a/crates/bevy_ecs/compile_fail/tests/ui/query_iter_combinations_mut_iterator_safety.stderr +++ b/crates/bevy_ecs/compile_fail/tests/ui/query_iter_combinations_mut_iterator_safety.stderr @@ -2,20 +2,20 @@ error[E0277]: the trait bound `&mut A: ReadOnlyQueryData` is not satisfied --> tests/ui/query_iter_combinations_mut_iterator_safety.rs:9:17 | 9 | is_iterator(iter) - | ----------- ^^^^ the trait `ReadOnlyQueryData` is not implemented for `&mut A`, which is required by `QueryCombinationIter<'_, '_, &mut A, (), _>: Iterator` + | ----------- ^^^^ the trait `ReadOnlyQueryData` is not implemented for `&mut A` | | | required by a bound introduced by this call | = help: the following other types implement trait `ReadOnlyQueryData`: - bevy_ecs::change_detection::Ref<'__w, T> - Has - AnyOf<()> - AnyOf<(F0,)> - AnyOf<(F0, F1)> - AnyOf<(F0, F1, F2)> - AnyOf<(F0, F1, F2, F3)> - AnyOf<(F0, F1, F2, F3, F4)> - and 34 others + &Archetype + &T + () + (F,) + (F0, F1) + (F0, F1, F2) + (F0, F1, F2, F3) + (F0, F1, F2, F3, F4) + and 36 others = note: `ReadOnlyQueryData` is implemented for `&A`, but not for `&mut A` = note: required for `QueryCombinationIter<'_, '_, &mut A, (), _>` to implement `Iterator` note: required by a bound in `is_iterator` diff --git a/crates/bevy_ecs/compile_fail/tests/ui/query_iter_many_mut_iterator_safety.stderr b/crates/bevy_ecs/compile_fail/tests/ui/query_iter_many_mut_iterator_safety.stderr index 33f78e2850..24aafb1df4 100644 --- a/crates/bevy_ecs/compile_fail/tests/ui/query_iter_many_mut_iterator_safety.stderr +++ b/crates/bevy_ecs/compile_fail/tests/ui/query_iter_many_mut_iterator_safety.stderr @@ -2,20 +2,20 @@ error[E0277]: the trait bound `&mut A: ReadOnlyQueryData` is not satisfied --> tests/ui/query_iter_many_mut_iterator_safety.rs:9:17 | 9 | is_iterator(iter) - | ----------- ^^^^ the trait `ReadOnlyQueryData` is not implemented for `&mut A`, which is required by `QueryManyIter<'_, '_, &mut A, (), std::array::IntoIter>: Iterator` + | ----------- ^^^^ the trait `ReadOnlyQueryData` is not implemented for `&mut A` | | | required by a bound introduced by this call | = help: the following other types implement trait `ReadOnlyQueryData`: - bevy_ecs::change_detection::Ref<'__w, T> - Has - AnyOf<()> - AnyOf<(F0,)> - AnyOf<(F0, F1)> - AnyOf<(F0, F1, F2)> - AnyOf<(F0, F1, F2, F3)> - AnyOf<(F0, F1, F2, F3, F4)> - and 34 others + &Archetype + &T + () + (F,) + (F0, F1) + (F0, F1, F2) + (F0, F1, F2, F3) + (F0, F1, F2, F3, F4) + and 36 others = note: `ReadOnlyQueryData` is implemented for `&A`, but not for `&mut A` = note: required for `QueryManyIter<'_, '_, &mut A, (), std::array::IntoIter>` to implement `Iterator` note: required by a bound in `is_iterator` diff --git a/crates/bevy_ecs/compile_fail/tests/ui/query_lens_lifetime_safety.rs b/crates/bevy_ecs/compile_fail/tests/ui/query_lens_lifetime_safety.rs new file mode 100644 index 0000000000..d3cdb06078 --- /dev/null +++ b/crates/bevy_ecs/compile_fail/tests/ui/query_lens_lifetime_safety.rs @@ -0,0 +1,46 @@ +use bevy_ecs::prelude::*; +use bevy_ecs::system::{QueryLens, SystemState}; + +#[derive(Component, Eq, PartialEq, Debug)] +struct Foo(u32); + +#[derive(Component, Eq, PartialEq, Debug)] +struct Bar(u32); + +fn main() { + let mut world = World::default(); + let e = world.spawn((Foo(10_u32), Bar(10_u32))).id(); + + let mut system_state = SystemState::<(Query<&mut Foo>, Query<&mut Bar>)>::new(&mut world); + { + let (mut foo_query, mut bar_query) = system_state.get_mut(&mut world); + dbg!("hi"); + { + let mut lens = foo_query.as_query_lens(); + let mut data: Mut = lens.query().get_inner(e).unwrap(); + let mut data2: Mut = lens.query().get_inner(e).unwrap(); + //~^ E0499 + assert_eq!(&mut *data, &mut *data2); // oops UB + } + + { + let mut join: QueryLens<(&mut Foo, &mut Bar)> = foo_query.join(&mut bar_query); + let mut query = join.query(); + let (_, mut data) = query.single_mut().unwrap(); + let mut data2 = bar_query.single_mut().unwrap(); + //~^ E0499 + assert_eq!(&mut *data, &mut *data2); // oops UB + } + + { + let mut join: QueryLens<(&mut Foo, &mut Bar)> = + foo_query.join_inner(bar_query.reborrow()); + let mut query = join.query(); + let (_, mut data) = query.single_mut().unwrap(); + let mut data2 = bar_query.single_mut().unwrap(); + //~^ E0499 + assert_eq!(&mut *data, &mut *data2); // oops UB + } + dbg!("bye"); + } +} diff --git a/crates/bevy_ecs/compile_fail/tests/ui/query_lens_lifetime_safety.stderr b/crates/bevy_ecs/compile_fail/tests/ui/query_lens_lifetime_safety.stderr new file mode 100644 index 0000000000..70aa6b37f6 --- /dev/null +++ b/crates/bevy_ecs/compile_fail/tests/ui/query_lens_lifetime_safety.stderr @@ -0,0 +1,38 @@ +error[E0499]: cannot borrow `lens` as mutable more than once at a time + --> tests/ui/query_lens_lifetime_safety.rs:21:39 + | +20 | let mut data: Mut = lens.query().get_inner(e).unwrap(); + | ---- first mutable borrow occurs here +21 | let mut data2: Mut = lens.query().get_inner(e).unwrap(); + | ^^^^ second mutable borrow occurs here +22 | +23 | assert_eq!(&mut *data, &mut *data2); // oops UB + | ---- first borrow later used here + +error[E0499]: cannot borrow `bar_query` as mutable more than once at a time + --> tests/ui/query_lens_lifetime_safety.rs:30:29 + | +27 | let mut join: QueryLens<(&mut Foo, &mut Bar)> = foo_query.join(&mut bar_query); + | -------------- first mutable borrow occurs here +... +30 | let mut data2 = bar_query.single_mut().unwrap(); + | ^^^^^^^^^ second mutable borrow occurs here +31 | +32 | assert_eq!(&mut *data, &mut *data2); // oops UB + | ---- first borrow later used here + +error[E0499]: cannot borrow `bar_query` as mutable more than once at a time + --> tests/ui/query_lens_lifetime_safety.rs:40:29 + | +37 | foo_query.join_inner(bar_query.reborrow()); + | --------- first mutable borrow occurs here +... +40 | let mut data2 = bar_query.single_mut().unwrap(); + | ^^^^^^^^^ second mutable borrow occurs here +41 | +42 | assert_eq!(&mut *data, &mut *data2); // oops UB + | ---- first borrow later used here + +error: aborting due to 3 previous errors + +For more information about this error, try `rustc --explain E0499`. diff --git a/crates/bevy_ecs/compile_fail/tests/ui/query_lifetime_safety.rs b/crates/bevy_ecs/compile_fail/tests/ui/query_lifetime_safety.rs index a8db25b223..62d76bec16 100644 --- a/crates/bevy_ecs/compile_fail/tests/ui/query_lifetime_safety.rs +++ b/crates/bevy_ecs/compile_fail/tests/ui/query_lifetime_safety.rs @@ -27,29 +27,29 @@ fn main() { } { - let data: &Foo = query.single(); - let mut data2: Mut = query.single_mut(); + let data: &Foo = query.single().unwrap(); + let mut data2: Mut = query.single_mut().unwrap(); //~^ E0502 assert_eq!(data, &mut *data2); // oops UB } { - let mut data2: Mut = query.single_mut(); - let data: &Foo = query.single(); + let mut data2: Mut = query.single_mut().unwrap(); + let data: &Foo = query.single().unwrap(); //~^ E0502 assert_eq!(data, &mut *data2); // oops UB } { - let data: &Foo = query.get_single().unwrap(); - let mut data2: Mut = query.get_single_mut().unwrap(); + let data: &Foo = query.single().unwrap(); + let mut data2: Mut = query.single_mut().unwrap(); //~^ E0502 assert_eq!(data, &mut *data2); // oops UB } { - let mut data2: Mut = query.get_single_mut().unwrap(); - let data: &Foo = query.get_single().unwrap(); + let mut data2: Mut = query.single_mut().unwrap(); + let data: &Foo = query.single().unwrap(); //~^ E0502 assert_eq!(data, &mut *data2); // oops UB } diff --git a/crates/bevy_ecs/compile_fail/tests/ui/query_lifetime_safety.stderr b/crates/bevy_ecs/compile_fail/tests/ui/query_lifetime_safety.stderr index c634ea8a70..c39840127a 100644 --- a/crates/bevy_ecs/compile_fail/tests/ui/query_lifetime_safety.stderr +++ b/crates/bevy_ecs/compile_fail/tests/ui/query_lifetime_safety.stderr @@ -45,9 +45,9 @@ error[E0502]: cannot borrow `query` as immutable because it is also borrowed as error[E0502]: cannot borrow `query` as mutable because it is also borrowed as immutable --> tests/ui/query_lifetime_safety.rs:45:39 | -44 | let data: &Foo = query.get_single().unwrap(); +44 | let data: &Foo = query.single().unwrap(); | ----- immutable borrow occurs here -45 | let mut data2: Mut = query.get_single_mut().unwrap(); +45 | let mut data2: Mut = query.single_mut().unwrap(); | ^^^^^^^^^^^^^^^^^^^^^^ mutable borrow occurs here 46 | 47 | assert_eq!(data, &mut *data2); // oops UB @@ -56,9 +56,9 @@ error[E0502]: cannot borrow `query` as mutable because it is also borrowed as im error[E0502]: cannot borrow `query` as immutable because it is also borrowed as mutable --> tests/ui/query_lifetime_safety.rs:52:30 | -51 | let mut data2: Mut = query.get_single_mut().unwrap(); +51 | let mut data2: Mut = query.single_mut().unwrap(); | ----- mutable borrow occurs here -52 | let data: &Foo = query.get_single().unwrap(); +52 | let data: &Foo = query.single().unwrap(); | ^^^^^ immutable borrow occurs here 53 | 54 | assert_eq!(data, &mut *data2); // oops UB diff --git a/crates/bevy_ecs/compile_fail/tests/ui/query_to_readonly.rs b/crates/bevy_ecs/compile_fail/tests/ui/query_to_readonly.rs index e8c66cff30..923f7894fd 100644 --- a/crates/bevy_ecs/compile_fail/tests/ui/query_to_readonly.rs +++ b/crates/bevy_ecs/compile_fail/tests/ui/query_to_readonly.rs @@ -35,13 +35,13 @@ fn for_loops(mut query: Query<&mut Foo>) { fn single_mut_query(mut query: Query<&mut Foo>) { // this should fail to compile { - let mut mut_foo = query.single_mut(); + let mut mut_foo = query.single_mut().unwrap(); // This solves "temporary value dropped while borrowed" let readonly_query = query.as_readonly(); //~^ E0502 - let ref_foo = readonly_query.single(); + let ref_foo = readonly_query.single().unwrap(); *mut_foo = Foo; @@ -55,7 +55,7 @@ fn single_mut_query(mut query: Query<&mut Foo>) { let ref_foo = readonly_query.single(); - let mut mut_foo = query.single_mut(); + let mut mut_foo = query.single_mut().unwrap(); //~^ E0502 println!("{ref_foo:?}"); diff --git a/crates/bevy_ecs/compile_fail/tests/ui/query_to_readonly.stderr b/crates/bevy_ecs/compile_fail/tests/ui/query_to_readonly.stderr index e5e469641d..30cbc3c142 100644 --- a/crates/bevy_ecs/compile_fail/tests/ui/query_to_readonly.stderr +++ b/crates/bevy_ecs/compile_fail/tests/ui/query_to_readonly.stderr @@ -6,13 +6,13 @@ error[E0502]: cannot borrow `query` as immutable because it is also borrowed as | | | mutable borrow occurs here | mutable borrow later used here -9 | for _ in query.to_readonly().iter() {} +9 | for _ in query.as_readonly().iter() {} | ^^^^^ immutable borrow occurs here error[E0502]: cannot borrow `query` as mutable because it is also borrowed as immutable --> tests/ui/query_to_readonly.rs:15:18 | -14 | for _ in query.to_readonly().iter() { +14 | for _ in query.as_readonly().iter() { | -------------------------- | | | immutable borrow occurs here @@ -26,7 +26,7 @@ error[E0502]: cannot borrow `query` as immutable because it is also borrowed as 38 | let mut mut_foo = query.single_mut(); | ----- mutable borrow occurs here ... -41 | let readonly_query = query.to_readonly(); +41 | let readonly_query = query.as_readonly(); | ^^^^^ immutable borrow occurs here ... 46 | *mut_foo = Foo; @@ -35,7 +35,7 @@ error[E0502]: cannot borrow `query` as immutable because it is also borrowed as error[E0502]: cannot borrow `query` as mutable because it is also borrowed as immutable --> tests/ui/query_to_readonly.rs:58:27 | -54 | let readonly_query = query.to_readonly(); +54 | let readonly_query = query.as_readonly(); | ----- immutable borrow occurs here ... 58 | let mut mut_foo = query.single_mut(); diff --git a/crates/bevy_ecs/compile_fail/tests/ui/query_transmute_safety.rs b/crates/bevy_ecs/compile_fail/tests/ui/query_transmute_safety.rs index 489c81d356..7518511fee 100644 --- a/crates/bevy_ecs/compile_fail/tests/ui/query_transmute_safety.rs +++ b/crates/bevy_ecs/compile_fail/tests/ui/query_transmute_safety.rs @@ -22,8 +22,8 @@ fn main() { let mut query_a = lens_a.query(); let mut query_b = lens_b.query(); - let a = query_a.single_mut(); - let b = query_b.single_mut(); // oops 2 mutable references to same Foo + let a = query_a.single_mut().unwrap(); + let b = query_b.single_mut().unwrap(); // oops 2 mutable references to same Foo assert_eq!(*a, *b); } @@ -34,8 +34,8 @@ fn main() { let mut query_b = lens.query(); //~^ E0499 - let a = query_a.single_mut(); - let b = query_b.single_mut(); // oops 2 mutable references to same Foo + let a = query_a.single_mut().unwrap(); + let b = query_b.single_mut().unwrap(); // oops 2 mutable references to same Foo assert_eq!(*a, *b); } } diff --git a/crates/bevy_ecs/compile_fail/tests/ui/system_param_derive_readonly.stderr b/crates/bevy_ecs/compile_fail/tests/ui/system_param_derive_readonly.stderr index 95057b2d64..2f767d960c 100644 --- a/crates/bevy_ecs/compile_fail/tests/ui/system_param_derive_readonly.stderr +++ b/crates/bevy_ecs/compile_fail/tests/ui/system_param_derive_readonly.stderr @@ -2,29 +2,29 @@ error[E0277]: the trait bound `&'static mut Foo: ReadOnlyQueryData` is not satis --> tests/ui/system_param_derive_readonly.rs:16:11 | 16 | state.get(&world); - | ^^^ the trait `ReadOnlyQueryData` is not implemented for `&'static mut Foo`, which is required by `Mutable<'_, '_>: ReadOnlySystemParam` + | ^^^ the trait `ReadOnlyQueryData` is not implemented for `&'static mut Foo` | = help: the following other types implement trait `ReadOnlyQueryData`: - bevy_ecs::change_detection::Ref<'__w, T> - Has - AnyOf<()> - AnyOf<(F0,)> - AnyOf<(F0, F1)> - AnyOf<(F0, F1, F2)> - AnyOf<(F0, F1, F2, F3)> - AnyOf<(F0, F1, F2, F3, F4)> - and 34 others + &Archetype + &T + () + (F,) + (F0, F1) + (F0, F1, F2) + (F0, F1, F2, F3) + (F0, F1, F2, F3, F4) + and 36 others = note: `ReadOnlyQueryData` is implemented for `&'static Foo`, but not for `&'static mut Foo` = note: required for `bevy_ecs::system::Query<'_, '_, &'static mut Foo>` to implement `ReadOnlySystemParam` = note: 1 redundant requirement hidden = note: required for `Mutable<'_, '_>` to implement `ReadOnlySystemParam` note: required by a bound in `SystemState::::get` - --> $BEVY_ROOT/crates/bevy_ecs/src/system/function_system.rs:215:16 + --> $BEVY_ROOT/bevy_ecs/src/system/function_system.rs:487:16 | -213 | pub fn get<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param> +485 | pub fn get<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param> | --- required by a bound in this associated function -214 | where -215 | Param: ReadOnlySystemParam, +486 | where +487 | Param: ReadOnlySystemParam, | ^^^^^^^^^^^^^^^^^^^ required by this bound in `SystemState::::get` error: aborting due to 1 previous error diff --git a/crates/bevy_ecs/compile_fail/tests/ui/system_query_iter_sort_lifetime_safety.rs b/crates/bevy_ecs/compile_fail/tests/ui/system_query_iter_sort_lifetime_safety.rs new file mode 100644 index 0000000000..118e732093 --- /dev/null +++ b/crates/bevy_ecs/compile_fail/tests/ui/system_query_iter_sort_lifetime_safety.rs @@ -0,0 +1,19 @@ +use bevy_ecs::prelude::*; +use std::cmp::Ordering; + +#[derive(Component)] +struct A(usize); + +fn system(mut query: Query<&mut A>) { + let iter = query.iter_mut(); + let mut stored: Option<&A> = None; + let mut sorted = iter.sort_by::<&A>(|left, _right| { + // Try to smuggle the lens item out of the closure. + stored = Some(left); + //~^ E0521 + Ordering::Equal + }); + let r: &A = stored.unwrap(); + let m: &mut A = &mut sorted.next().unwrap(); + assert!(std::ptr::eq(r, m)); +} diff --git a/crates/bevy_ecs/compile_fail/tests/ui/system_query_iter_sort_lifetime_safety.stderr b/crates/bevy_ecs/compile_fail/tests/ui/system_query_iter_sort_lifetime_safety.stderr new file mode 100644 index 0000000000..4565ece326 --- /dev/null +++ b/crates/bevy_ecs/compile_fail/tests/ui/system_query_iter_sort_lifetime_safety.stderr @@ -0,0 +1,14 @@ +error[E0521]: borrowed data escapes outside of closure + --> tests/ui/system_query_iter_sort_lifetime_safety.rs:12:9 + | +9 | let mut stored: Option<&A> = None; + | ---------- `stored` declared here, outside of the closure body +10 | let mut sorted = iter.sort_by::<&A>(|left, _right| { + | ---- `left` is a reference that is only valid in the closure body +11 | // Try to smuggle the lens item out of the closure. +12 | stored = Some(left); + | ^^^^^^^^^^^^^^^^^^^ `left` escapes the closure body here + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0521`. diff --git a/crates/bevy_ecs/compile_fail/tests/ui/world_query_derive.stderr b/crates/bevy_ecs/compile_fail/tests/ui/world_query_derive.stderr index e712fb0bf1..4460ce08a0 100644 --- a/crates/bevy_ecs/compile_fail/tests/ui/world_query_derive.stderr +++ b/crates/bevy_ecs/compile_fail/tests/ui/world_query_derive.stderr @@ -1,3 +1,21 @@ +error: invalid attribute, expected `mutable` or `derive` + --> tests/ui/world_query_derive.rs:14:14 + | +14 | #[query_data(mut)] + | ^^^ + +error: `mutable` does not take any arguments + --> tests/ui/world_query_derive.rs:21:14 + | +21 | #[query_data(mutable(foo))] + | ^^^^^^^ + +error: `derive` requires at least one argument + --> tests/ui/world_query_derive.rs:28:14 + | +28 | #[query_data(derive)] + | ^^^^^^ + error[E0277]: the trait bound `&'static mut Foo: ReadOnlyQueryData` is not satisfied --> tests/ui/world_query_derive.rs:10:8 | @@ -5,15 +23,15 @@ error[E0277]: the trait bound `&'static mut Foo: ReadOnlyQueryData` is not satis | ^^^^^^^^^^^^^^^^ the trait `ReadOnlyQueryData` is not implemented for `&'static mut Foo` | = help: the following other types implement trait `ReadOnlyQueryData`: - MutableUnmarked - MutableMarkedReadOnly - NestedMutableUnmarked - bevy_ecs::change_detection::Ref<'__w, T> - Has - AnyOf<()> - AnyOf<(F0,)> - AnyOf<(F0, F1)> - and 37 others + &Archetype + &T + () + (F,) + (F0, F1) + (F0, F1, F2) + (F0, F1, F2, F3) + (F0, F1, F2, F3, F4) + and 39 others note: required by a bound in `_::assert_readonly` --> tests/ui/world_query_derive.rs:7:10 | @@ -22,28 +40,28 @@ note: required by a bound in `_::assert_readonly` = note: this error originates in the derive macro `QueryData` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `MutableMarked: ReadOnlyQueryData` is not satisfied - --> tests/ui/world_query_derive.rs:22:8 + --> tests/ui/world_query_derive.rs:43:8 | -22 | a: MutableMarked, +43 | a: MutableMarked, | ^^^^^^^^^^^^^ the trait `ReadOnlyQueryData` is not implemented for `MutableMarked` | = help: the following other types implement trait `ReadOnlyQueryData`: - MutableUnmarked - MutableMarkedReadOnly - NestedMutableUnmarked - bevy_ecs::change_detection::Ref<'__w, T> - Has - AnyOf<()> - AnyOf<(F0,)> - AnyOf<(F0, F1)> - and 37 others + &Archetype + &T + () + (F,) + (F0, F1) + (F0, F1, F2) + (F0, F1, F2, F3) + (F0, F1, F2, F3, F4) + and 39 others note: required by a bound in `_::assert_readonly` - --> tests/ui/world_query_derive.rs:19:10 + --> tests/ui/world_query_derive.rs:40:10 | -19 | #[derive(QueryData)] +40 | #[derive(QueryData)] | ^^^^^^^^^ required by this bound in `assert_readonly` = note: this error originates in the derive macro `QueryData` (in Nightly builds, run with -Z macro-backtrace for more info) -error: aborting due to 2 previous errors +error: aborting due to 5 previous errors For more information about this error, try `rustc --explain E0277`. diff --git a/crates/bevy_ecs/examples/change_detection.rs b/crates/bevy_ecs/examples/change_detection.rs index 23420b5e88..1b101b4033 100644 --- a/crates/bevy_ecs/examples/change_detection.rs +++ b/crates/bevy_ecs/examples/change_detection.rs @@ -8,6 +8,7 @@ #![expect( clippy::std_instead_of_core, + clippy::print_stdout, reason = "Examples should not follow this lint" )] diff --git a/crates/bevy_ecs/examples/events.rs b/crates/bevy_ecs/examples/events.rs index aac9dc38bc..70efa9b471 100644 --- a/crates/bevy_ecs/examples/events.rs +++ b/crates/bevy_ecs/examples/events.rs @@ -1,6 +1,8 @@ //! In this example a system sends a custom event with a 50/50 chance during any frame. //! If an event was send, it will be printed by the console in a receiving system. +#![expect(clippy::print_stdout, reason = "Allowed in examples.")] + use bevy_ecs::{event::EventRegistry, prelude::*}; fn main() { @@ -45,7 +47,7 @@ struct MyEvent { fn sending_system(mut event_writer: EventWriter) { let random_value: f32 = rand::random(); if random_value > 0.5 { - event_writer.send(MyEvent { + event_writer.write(MyEvent { message: "A random event with value > 0.5".to_string(), random_value, }); diff --git a/crates/bevy_ecs/examples/resources.rs b/crates/bevy_ecs/examples/resources.rs index 43eddf7ce2..bb079d249d 100644 --- a/crates/bevy_ecs/examples/resources.rs +++ b/crates/bevy_ecs/examples/resources.rs @@ -3,6 +3,7 @@ #![expect( clippy::std_instead_of_core, + clippy::print_stdout, reason = "Examples should not follow this lint" )] diff --git a/crates/bevy_ecs/macros/Cargo.toml b/crates/bevy_ecs/macros/Cargo.toml index f1ea54894a..28605a5d67 100644 --- a/crates/bevy_ecs/macros/Cargo.toml +++ b/crates/bevy_ecs/macros/Cargo.toml @@ -2,7 +2,7 @@ name = "bevy_ecs_macros" version = "0.16.0-dev" description = "Bevy ECS Macros" -edition = "2021" +edition = "2024" license = "MIT OR Apache-2.0" [lib] @@ -11,7 +11,7 @@ proc-macro = true [dependencies] bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.16.0-dev" } -syn = { version = "2.0", features = ["full"] } +syn = { version = "2.0.99", features = ["full", "extra-traits"] } quote = "1.0" proc-macro2 = "1.0" [lints] diff --git a/crates/bevy_ecs/macros/src/component.rs b/crates/bevy_ecs/macros/src/component.rs index 76732c0b89..d3199c0909 100644 --- a/crates/bevy_ecs/macros/src/component.rs +++ b/crates/bevy_ecs/macros/src/component.rs @@ -1,20 +1,26 @@ -use proc_macro::{TokenStream, TokenTree}; +use proc_macro::TokenStream; use proc_macro2::{Span, TokenStream as TokenStream2}; use quote::{format_ident, quote, ToTokens}; use std::collections::HashSet; use syn::{ - parenthesized, + braced, parenthesized, parse::Parse, parse_macro_input, parse_quote, punctuated::Punctuated, spanned::Spanned, - token::{Comma, Paren}, - Data, DataStruct, DeriveInput, ExprClosure, ExprPath, Fields, Ident, Index, LitStr, Member, - Path, Result, Token, Visibility, + token::{Brace, Comma, Paren}, + Data, DataEnum, DataStruct, DeriveInput, Expr, ExprCall, ExprPath, Field, Fields, Ident, + LitStr, Member, Path, Result, Token, Type, Visibility, }; +pub const EVENT: &str = "event"; +pub const AUTO_PROPAGATE: &str = "auto_propagate"; +pub const TRAVERSAL: &str = "traversal"; + pub fn derive_event(input: TokenStream) -> TokenStream { let mut ast = parse_macro_input!(input as DeriveInput); + let mut auto_propagate = false; + let mut traversal: Type = parse_quote!(()); let bevy_ecs_path: Path = crate::bevy_ecs_path(); ast.generics @@ -22,13 +28,30 @@ pub fn derive_event(input: TokenStream) -> TokenStream { .predicates .push(parse_quote! { Self: Send + Sync + 'static }); + if let Some(attr) = ast.attrs.iter().find(|attr| attr.path().is_ident(EVENT)) { + if let Err(e) = attr.parse_nested_meta(|meta| match meta.path.get_ident() { + Some(ident) if ident == AUTO_PROPAGATE => { + auto_propagate = true; + Ok(()) + } + Some(ident) if ident == TRAVERSAL => { + traversal = meta.value()?.parse()?; + Ok(()) + } + Some(ident) => Err(meta.error(format!("unsupported attribute: {}", ident))), + None => Err(meta.error("expected identifier")), + }) { + return e.to_compile_error().into(); + } + } + let struct_name = &ast.ident; let (impl_generics, type_generics, where_clause) = &ast.generics.split_for_impl(); TokenStream::from(quote! { impl #impl_generics #bevy_ecs_path::event::Event for #struct_name #type_generics #where_clause { - type Traversal = (); - const AUTO_PROPAGATE: bool = false; + type Traversal = #traversal; + const AUTO_PROPAGATE: bool = #auto_propagate; } }) } @@ -51,8 +74,6 @@ pub fn derive_resource(input: TokenStream) -> TokenStream { }) } -const ENTITIES_ATTR: &str = "entities"; - pub fn derive_component(input: TokenStream) -> TokenStream { let mut ast = parse_macro_input!(input as DeriveInput); let bevy_ecs_path: Path = crate::bevy_ecs_path(); @@ -71,12 +92,26 @@ pub fn derive_component(input: TokenStream) -> TokenStream { Err(err) => err.into_compile_error().into(), }; - let visit_entities = visit_entities(&ast.data, &bevy_ecs_path, relationship.is_some()); + let map_entities = map_entities( + &ast.data, + Ident::new("this", Span::call_site()), + relationship.is_some(), + relationship_target.is_some(), + ).map(|map_entities_impl| quote! { + fn map_entities(this: &mut Self, mapper: &mut M) { + use #bevy_ecs_path::entity::MapEntities; + #map_entities_impl + } + }); let storage = storage_path(&bevy_ecs_path, attrs.storage); - let on_add_path = attrs.on_add.map(|path| path.to_token_stream()); - let on_remove_path = attrs.on_remove.map(|path| path.to_token_stream()); + let on_add_path = attrs + .on_add + .map(|path| path.to_token_stream(&bevy_ecs_path)); + let on_remove_path = attrs + .on_remove + .map(|path| path.to_token_stream(&bevy_ecs_path)); let on_insert_path = if relationship.is_some() { if attrs.on_insert.is_some() { @@ -90,7 +125,9 @@ pub fn derive_component(input: TokenStream) -> TokenStream { Some(quote!(::on_insert)) } else { - attrs.on_insert.map(|path| path.to_token_stream()) + attrs + .on_insert + .map(|path| path.to_token_stream(&bevy_ecs_path)) }; let on_replace_path = if relationship.is_some() { @@ -116,7 +153,9 @@ pub fn derive_component(input: TokenStream) -> TokenStream { Some(quote!(::on_replace)) } else { - attrs.on_replace.map(|path| path.to_token_stream()) + attrs + .on_replace + .map(|path| path.to_token_stream(&bevy_ecs_path)) }; let on_despawn_path = if attrs @@ -134,7 +173,9 @@ pub fn derive_component(input: TokenStream) -> TokenStream { Some(quote!(::on_despawn)) } else { - attrs.on_despawn.map(|path| path.to_token_stream()) + attrs + .on_despawn + .map(|path| path.to_token_stream(&bevy_ecs_path)) }; let on_add = hook_register_function_call(&bevy_ecs_path, quote! {on_add}, on_add_path); @@ -166,17 +207,7 @@ pub fn derive_component(input: TokenStream) -> TokenStream { ); }); match &require.func { - Some(RequireFunc::Path(func)) => { - register_required.push(quote! { - components.register_required_components_manual::( - required_components, - || { let x: #ident = #func().into(); x }, - inheritance_depth, - recursion_check_stack - ); - }); - } - Some(RequireFunc::Closure(func)) => { + Some(func) => { register_required.push(quote! { components.register_required_components_manual::( required_components, @@ -202,12 +233,24 @@ pub fn derive_component(input: TokenStream) -> TokenStream { let struct_name = &ast.ident; let (impl_generics, type_generics, where_clause) = &ast.generics.split_for_impl(); + let required_component_docs = attrs.requires.map(|r| { + let paths = r + .iter() + .map(|r| format!("[`{}`]", r.path.to_token_stream())) + .collect::>() + .join(", "); + let doc = format!("**Required Components**: {paths}. \n\n A component's Required Components are inserted whenever it is inserted. Note that this will also insert the required components _of_ the required components, recursively, in depth-first order."); + quote! { + #[doc = #doc] + } + }); + let mutable_type = (attrs.immutable || relationship.is_some()) .then_some(quote! { #bevy_ecs_path::component::Immutable }) .unwrap_or(quote! { #bevy_ecs_path::component::Mutable }); let clone_behavior = if relationship_target.is_some() { - quote!(#bevy_ecs_path::component::ComponentCloneBehavior::RelationshipTarget(#bevy_ecs_path::relationship::clone_relationship_target::)) + quote!(#bevy_ecs_path::component::ComponentCloneBehavior::Custom(#bevy_ecs_path::relationship::clone_relationship_target::)) } else { quote!( use #bevy_ecs_path::component::{DefaultCloneBehaviorBase, DefaultCloneBehaviorViaClone}; @@ -218,12 +261,13 @@ pub fn derive_component(input: TokenStream) -> TokenStream { // This puts `register_required` before `register_recursive_requires` to ensure that the constructors of _all_ top // level components are initialized first, giving them precedence over recursively defined constructors for the same component type TokenStream::from(quote! { + #required_component_docs impl #impl_generics #bevy_ecs_path::component::Component for #struct_name #type_generics #where_clause { const STORAGE_TYPE: #bevy_ecs_path::component::StorageType = #storage; type Mutability = #mutable_type; fn register_required_components( requiree: #bevy_ecs_path::component::ComponentId, - components: &mut #bevy_ecs_path::component::Components, + components: &mut #bevy_ecs_path::component::ComponentsRegistrator, required_components: &mut #bevy_ecs_path::component::RequiredComponents, inheritance_depth: u16, recursion_check_stack: &mut #bevy_ecs_path::__macro_exports::Vec<#bevy_ecs_path::component::ComponentId> @@ -246,7 +290,7 @@ pub fn derive_component(input: TokenStream) -> TokenStream { #clone_behavior } - #visit_entities + #map_entities } #relationship @@ -255,172 +299,90 @@ pub fn derive_component(input: TokenStream) -> TokenStream { }) } -fn visit_entities(data: &Data, bevy_ecs_path: &Path, is_relationship: bool) -> TokenStream2 { +const ENTITIES: &str = "entities"; + +pub(crate) fn map_entities( + data: &Data, + self_ident: Ident, + is_relationship: bool, + is_relationship_target: bool, +) -> Option { match data { - Data::Struct(DataStruct { ref fields, .. }) => { - let mut visited_fields = Vec::new(); - let mut visited_indices = Vec::new(); - match fields { - Fields::Named(fields) => { - for field in &fields.named { - if field - .attrs - .iter() - .any(|a| a.meta.path().is_ident(ENTITIES_ATTR)) - { - if let Some(ident) = field.ident.clone() { - visited_fields.push(ident); - } - } - } - } - Fields::Unnamed(fields) => { - for (index, field) in fields.unnamed.iter().enumerate() { - if index == 0 && is_relationship { - visited_indices.push(Index::from(0)); - } else if field - .attrs - .iter() - .any(|a| a.meta.path().is_ident(ENTITIES_ATTR)) - { - visited_indices.push(Index::from(index)); - } - } - } - Fields::Unit => {} - } + Data::Struct(DataStruct { fields, .. }) => { + let mut map = Vec::with_capacity(fields.len()); - if visited_fields.is_empty() && visited_indices.is_empty() { - TokenStream2::new() + let relationship = if is_relationship || is_relationship_target { + relationship_field(fields, "MapEntities", fields.span()).ok() } else { - let visit = visited_fields - .iter() - .map(|field| quote!(this.#field.visit_entities(&mut func);)) - .chain( - visited_indices - .iter() - .map(|index| quote!(this.#index.visit_entities(&mut func);)), - ); - let visit_mut = visited_fields - .iter() - .map(|field| quote!(this.#field.visit_entities_mut(&mut func);)) - .chain( - visited_indices - .iter() - .map(|index| quote!(this.#index.visit_entities_mut(&mut func);)), - ); - quote!( - fn visit_entities(this: &Self, mut func: impl FnMut(Entity)) { - use #bevy_ecs_path::entity::VisitEntities; - #(#visit)* - } + None + }; + fields + .iter() + .enumerate() + .filter(|(_, field)| { + field.attrs.iter().any(|a| a.path().is_ident(ENTITIES)) + || relationship.is_some_and(|relationship| relationship == *field) + }) + .for_each(|(index, field)| { + let field_member = field + .ident + .clone() + .map_or(Member::from(index), Member::Named); - fn visit_entities_mut(this: &mut Self, mut func: impl FnMut(&mut Entity)) { - use #bevy_ecs_path::entity::VisitEntitiesMut; - #(#visit_mut)* - } - ) - } + map.push(quote!(#self_ident.#field_member.map_entities(mapper);)); + }); + if map.is_empty() { + return None; + }; + Some(quote!( + #(#map)* + )) } - Data::Enum(data_enum) => { - let mut has_visited_fields = false; - let mut visit_variants = Vec::with_capacity(data_enum.variants.len()); - let mut visit_variants_mut = Vec::with_capacity(data_enum.variants.len()); - for variant in &data_enum.variants { - let mut variant_fields = Vec::new(); - let mut variant_fields_mut = Vec::new(); + Data::Enum(DataEnum { variants, .. }) => { + let mut map = Vec::with_capacity(variants.len()); - let mut visit_variant_fields = Vec::new(); - let mut visit_variant_fields_mut = Vec::new(); - - for (index, field) in variant.fields.iter().enumerate() { - if field - .attrs - .iter() - .any(|a| a.meta.path().is_ident(ENTITIES_ATTR)) - { - has_visited_fields = true; - let field_member = ident_or_index(field.ident.as_ref(), index); - let field_ident = format_ident!("field_{}", field_member); - - variant_fields.push(quote!(#field_member: ref #field_ident)); - variant_fields_mut.push(quote!(#field_member: ref mut #field_ident)); - - visit_variant_fields.push(quote!(#field_ident.visit_entities(&mut func);)); - visit_variant_fields_mut - .push(quote!(#field_ident.visit_entities_mut(&mut func);)); - } - } + for variant in variants.iter() { + let field_members = variant + .fields + .iter() + .enumerate() + .filter(|(_, field)| field.attrs.iter().any(|a| a.path().is_ident(ENTITIES))) + .map(|(index, field)| { + field + .ident + .clone() + .map_or(Member::from(index), Member::Named) + }) + .collect::>(); let ident = &variant.ident; - visit_variants.push(quote!(Self::#ident {#(#variant_fields,)* ..} => { - #(#visit_variant_fields)* - })); - visit_variants_mut.push(quote!(Self::#ident {#(#variant_fields_mut,)* ..} => { - #(#visit_variant_fields_mut)* - })); + let field_idents = field_members + .iter() + .map(|member| format_ident!("__self_{}", member)) + .collect::>(); + + map.push( + quote!(Self::#ident {#(#field_members: #field_idents,)* ..} => { + #(#field_idents.map_entities(mapper);)* + }), + ); } - if has_visited_fields { - quote!( - fn visit_entities(this: &Self, mut func: impl FnMut(Entity)) { - use #bevy_ecs_path::entity::VisitEntities; - match this { - #(#visit_variants,)* - _ => {} - } - } - fn visit_entities_mut(this: &mut Self, mut func: impl FnMut(&mut Entity)) { - use #bevy_ecs_path::entity::VisitEntitiesMut; - match this { - #(#visit_variants_mut,)* - _ => {} - } - } - ) - } else { - TokenStream2::new() - } + if map.is_empty() { + return None; + }; + + Some(quote!( + match #self_ident { + #(#map,)* + _ => {} + } + )) } - Data::Union(_) => TokenStream2::new(), + Data::Union(_) => None, } } -pub(crate) fn ident_or_index(ident: Option<&Ident>, index: usize) -> Member { - ident.map_or_else( - || Member::Unnamed(index.into()), - |ident| Member::Named(ident.clone()), - ) -} - -pub fn document_required_components(attr: TokenStream, item: TokenStream) -> TokenStream { - let paths = parse_macro_input!(attr with Punctuated::::parse_terminated) - .iter() - .map(|r| format!("[`{}`]", r.path.to_token_stream())) - .collect::>() - .join(", "); - - let bevy_ecs_path = crate::bevy_ecs_path() - .to_token_stream() - .to_string() - .replace(' ', ""); - let required_components_path = bevy_ecs_path + "::component::Component#required-components"; - - // Insert information about required components after any existing doc comments - let mut out = TokenStream::new(); - let mut end_of_attributes_reached = false; - for tt in item { - if !end_of_attributes_reached & matches!(tt, TokenTree::Ident(_)) { - end_of_attributes_reached = true; - let doc: TokenStream = format!("#[doc = \"\n\n# Required Components\n{paths} \n\n A component's [required components]({required_components_path}) are inserted whenever it is inserted. Note that this will also insert the required components _of_ the required components, recursively, in depth-first order.\"]").parse().unwrap(); - out.extend(doc); - } - out.extend(Some(tt)); - } - - out -} - pub const COMPONENT: &str = "component"; pub const STORAGE: &str = "storage"; pub const REQUIRE: &str = "require"; @@ -435,14 +397,64 @@ pub const ON_DESPAWN: &str = "on_despawn"; pub const IMMUTABLE: &str = "immutable"; +/// All allowed attribute value expression kinds for component hooks +#[derive(Debug)] +enum HookAttributeKind { + /// expressions like function or struct names + /// + /// structs will throw compile errors on the code generation so this is safe + Path(ExprPath), + /// function call like expressions + Call(ExprCall), +} + +impl HookAttributeKind { + fn from_expr(value: Expr) -> Result { + match value { + Expr::Path(path) => Ok(HookAttributeKind::Path(path)), + Expr::Call(call) => Ok(HookAttributeKind::Call(call)), + // throw meaningful error on all other expressions + _ => Err(syn::Error::new( + value.span(), + [ + "Not supported in this position, please use one of the following:", + "- path to function", + "- call to function yielding closure", + ] + .join("\n"), + )), + } + } + + fn to_token_stream(&self, bevy_ecs_path: &Path) -> TokenStream2 { + match self { + HookAttributeKind::Path(path) => path.to_token_stream(), + HookAttributeKind::Call(call) => { + quote!({ + fn _internal_hook(world: #bevy_ecs_path::world::DeferredWorld, ctx: #bevy_ecs_path::component::HookContext) { + (#call)(world, ctx) + } + _internal_hook + }) + } + } + } +} + +impl Parse for HookAttributeKind { + fn parse(input: syn::parse::ParseStream) -> Result { + input.parse::().and_then(Self::from_expr) + } +} + struct Attrs { storage: StorageTy, requires: Option>, - on_add: Option, - on_insert: Option, - on_replace: Option, - on_remove: Option, - on_despawn: Option, + on_add: Option, + on_insert: Option, + on_replace: Option, + on_remove: Option, + on_despawn: Option, relationship: Option, relationship_target: Option, immutable: bool, @@ -456,20 +468,15 @@ enum StorageTy { struct Require { path: Path, - func: Option, -} - -enum RequireFunc { - Path(Path), - Closure(ExprClosure), + func: Option, } struct Relationship { - relationship_target: Ident, + relationship_target: Type, } struct RelationshipTarget { - relationship: Ident, + relationship: Type, linked_spawn: bool, } @@ -507,19 +514,19 @@ fn parse_component_attr(ast: &DeriveInput) -> Result { }; Ok(()) } else if nested.path.is_ident(ON_ADD) { - attrs.on_add = Some(nested.value()?.parse::()?); + attrs.on_add = Some(nested.value()?.parse::()?); Ok(()) } else if nested.path.is_ident(ON_INSERT) { - attrs.on_insert = Some(nested.value()?.parse::()?); + attrs.on_insert = Some(nested.value()?.parse::()?); Ok(()) } else if nested.path.is_ident(ON_REPLACE) { - attrs.on_replace = Some(nested.value()?.parse::()?); + attrs.on_replace = Some(nested.value()?.parse::()?); Ok(()) } else if nested.path.is_ident(ON_REMOVE) { - attrs.on_remove = Some(nested.value()?.parse::()?); + attrs.on_remove = Some(nested.value()?.parse::()?); Ok(()) } else if nested.path.is_ident(ON_DESPAWN) { - attrs.on_despawn = Some(nested.value()?.parse::()?); + attrs.on_despawn = Some(nested.value()?.parse::()?); Ok(()) } else if nested.path.is_ident(IMMUTABLE) { attrs.immutable = true; @@ -558,19 +565,64 @@ fn parse_component_attr(ast: &DeriveInput) -> Result { impl Parse for Require { fn parse(input: syn::parse::ParseStream) -> Result { - let path = input.parse::()?; - let func = if input.peek(Paren) { + let mut path = input.parse::()?; + let mut last_segment_is_lower = false; + let mut is_constructor_call = false; + + // Use the case of the type name to check if it's an enum + // This doesn't match everything that can be an enum according to the rust spec + // but it matches what clippy is OK with + let is_enum = { + let mut first_chars = path + .segments + .iter() + .rev() + .filter_map(|s| s.ident.to_string().chars().next()); + if let Some(last) = first_chars.next() { + if last.is_uppercase() { + if let Some(last) = first_chars.next() { + last.is_uppercase() + } else { + false + } + } else { + last_segment_is_lower = true; + false + } + } else { + false + } + }; + + let func = if input.peek(Token![=]) { + // If there is an '=', then this is a "function style" require + input.parse::()?; + let expr: Expr = input.parse()?; + Some(quote!(|| #expr )) + } else if input.peek(Brace) { + // This is a "value style" named-struct-like require + let content; + braced!(content in input); + let content = content.parse::()?; + Some(quote!(|| #path { #content })) + } else if input.peek(Paren) { + // This is a "value style" tuple-struct-like require let content; parenthesized!(content in input); - if let Ok(func) = content.parse::() { - Some(RequireFunc::Closure(func)) - } else { - let func = content.parse::()?; - Some(RequireFunc::Path(func)) - } + let content = content.parse::()?; + is_constructor_call = last_segment_is_lower; + Some(quote!(|| #path (#content))) + } else if is_enum { + // if this is an enum, then it is an inline enum component declaration + Some(quote!(|| #path)) } else { + // if this isn't any of the above, then it is a component ident, which will use Default None }; + if is_enum || is_constructor_call { + path.segments.pop(); + path.segments.pop_punct(); + } Ok(Require { path, func }) } } @@ -598,47 +650,48 @@ fn hook_register_function_call( }) } +mod kw { + syn::custom_keyword!(relationship_target); + syn::custom_keyword!(relationship); + syn::custom_keyword!(linked_spawn); +} + impl Parse for Relationship { fn parse(input: syn::parse::ParseStream) -> Result { - syn::custom_keyword!(relationship_target); - input.parse::()?; + input.parse::()?; input.parse::()?; Ok(Relationship { - relationship_target: input.parse::()?, + relationship_target: input.parse::()?, }) } } impl Parse for RelationshipTarget { fn parse(input: syn::parse::ParseStream) -> Result { - let mut relationship_ident = None; - let mut linked_spawn_exists = false; - syn::custom_keyword!(relationship); - syn::custom_keyword!(linked_spawn); - let mut done = false; - loop { - if input.peek(relationship) { - input.parse::()?; + let mut relationship: Option = None; + let mut linked_spawn: bool = false; + + while !input.is_empty() { + let lookahead = input.lookahead1(); + if lookahead.peek(kw::linked_spawn) { + input.parse::()?; + linked_spawn = true; + } else if lookahead.peek(kw::relationship) { + input.parse::()?; input.parse::()?; - relationship_ident = Some(input.parse::()?); - } else if input.peek(linked_spawn) { - input.parse::()?; - linked_spawn_exists = true; + relationship = Some(input.parse()?); } else { - done = true; + return Err(lookahead.error()); } - if input.peek(Token![,]) { + if !input.is_empty() { input.parse::()?; } - if done { - break; - } } - - let relationship = relationship_ident.ok_or_else(|| syn::Error::new(input.span(), "RelationshipTarget derive must specify a relationship via #[relationship_target(relationship = X)"))?; Ok(RelationshipTarget { - relationship, - linked_spawn: linked_spawn_exists, + relationship: relationship.ok_or_else(|| { + syn::Error::new(input.span(), "Missing `relationship = X` attribute") + })?, + linked_spawn, }) } } @@ -651,25 +704,23 @@ fn derive_relationship( let Some(relationship) = &attrs.relationship else { return Ok(None); }; - const RELATIONSHIP_FORMAT_MESSAGE: &str = "Relationship derives must be a tuple struct with the only element being an EntityTargets type (ex: ChildOf(Entity))"; - if let Data::Struct(DataStruct { - fields: Fields::Unnamed(unnamed_fields), + let Data::Struct(DataStruct { + fields, struct_token, .. }) = &ast.data - { - if unnamed_fields.unnamed.len() != 1 { - return Err(syn::Error::new(ast.span(), RELATIONSHIP_FORMAT_MESSAGE)); - } - if unnamed_fields.unnamed.first().is_none() { - return Err(syn::Error::new( - struct_token.span(), - RELATIONSHIP_FORMAT_MESSAGE, - )); - } - } else { - return Err(syn::Error::new(ast.span(), RELATIONSHIP_FORMAT_MESSAGE)); + else { + return Err(syn::Error::new( + ast.span(), + "Relationship can only be derived for structs.", + )); }; + let field = relationship_field(fields, "Relationship", struct_token.span())?; + + let relationship_member = field.ident.clone().map_or(Member::from(0), Member::Named); + let members = fields + .members() + .filter(|member| member != &relationship_member); let struct_name = &ast.ident; let (impl_generics, type_generics, where_clause) = &ast.generics.split_for_impl(); @@ -682,12 +733,15 @@ fn derive_relationship( #[inline(always)] fn get(&self) -> #bevy_ecs_path::entity::Entity { - self.0 + self.#relationship_member } #[inline] fn from(entity: #bevy_ecs_path::entity::Entity) -> Self { - Self(entity) + Self { + #(#members: core::default::Default::default(),)* + #relationship_member: entity + } } } })) @@ -702,30 +756,28 @@ fn derive_relationship_target( return Ok(None); }; - const RELATIONSHIP_TARGET_FORMAT_MESSAGE: &str = "RelationshipTarget derives must be a tuple struct with the first element being a private RelationshipSourceCollection (ex: Children(Vec))"; - let collection = if let Data::Struct(DataStruct { - fields: Fields::Unnamed(unnamed_fields), + let Data::Struct(DataStruct { + fields, struct_token, .. }) = &ast.data - { - if let Some(first) = unnamed_fields.unnamed.first() { - if first.vis != Visibility::Inherited { - return Err(syn::Error::new(first.span(), "The collection in RelationshipTarget must be private to prevent users from directly mutating it, which could invalidate the correctness of relationships.")); - } - first.ty.clone() - } else { - return Err(syn::Error::new( - struct_token.span(), - RELATIONSHIP_TARGET_FORMAT_MESSAGE, - )); - } - } else { + else { return Err(syn::Error::new( ast.span(), - RELATIONSHIP_TARGET_FORMAT_MESSAGE, + "RelationshipTarget can only be derived for structs.", )); }; + let field = relationship_field(fields, "RelationshipTarget", struct_token.span())?; + + if field.vis != Visibility::Inherited { + return Err(syn::Error::new(field.span(), "The collection in RelationshipTarget must be private to prevent users from directly mutating it, which could invalidate the correctness of relationships.")); + } + let collection = &field.ty; + let relationship_member = field.ident.clone().map_or(Member::from(0), Member::Named); + + let members = fields + .members() + .filter(|member| member != &relationship_member); let relationship = &relationship_target.relationship; let struct_name = &ast.ident; @@ -739,18 +791,56 @@ fn derive_relationship_target( #[inline] fn collection(&self) -> &Self::Collection { - &self.0 + &self.#relationship_member } #[inline] fn collection_mut_risky(&mut self) -> &mut Self::Collection { - &mut self.0 + &mut self.#relationship_member } #[inline] fn from_collection_risky(collection: Self::Collection) -> Self { - Self(collection) + Self { + #(#members: core::default::Default::default(),)* + #relationship_member: collection + } } } })) } + +/// Returns the field with the `#[relationship]` attribute, the only field if unnamed, +/// or the only field in a [`Fields::Named`] with one field, otherwise `Err`. +fn relationship_field<'a>( + fields: &'a Fields, + derive: &'static str, + span: Span, +) -> Result<&'a Field> { + match fields { + Fields::Named(fields) if fields.named.len() == 1 => Ok(fields.named.first().unwrap()), + Fields::Named(fields) => fields.named.iter().find(|field| { + field + .attrs + .iter() + .any(|attr| attr.path().is_ident(RELATIONSHIP)) + }).ok_or(syn::Error::new( + span, + format!("{derive} derive expected named structs with a single field or with a field annotated with #[relationship].") + )), + Fields::Unnamed(fields) => fields + .unnamed + .len() + .eq(&1) + .then(|| fields.unnamed.first()) + .flatten() + .ok_or(syn::Error::new( + span, + format!("{derive} derive expected unnamed structs with one field."), + )), + Fields::Unit => Err(syn::Error::new( + span, + format!("{derive} derive expected named or unnamed struct, found unit struct."), + )), + } +} diff --git a/crates/bevy_ecs/macros/src/lib.rs b/crates/bevy_ecs/macros/src/lib.rs index e97149ec04..a657765ac2 100644 --- a/crates/bevy_ecs/macros/src/lib.rs +++ b/crates/bevy_ecs/macros/src/lib.rs @@ -9,10 +9,13 @@ mod query_filter; mod states; mod world_query; -use crate::{query_data::derive_query_data_impl, query_filter::derive_query_filter_impl}; +use crate::{ + component::map_entities, query_data::derive_query_data_impl, + query_filter::derive_query_filter_impl, +}; use bevy_macro_utils::{derive_label, ensure_no_collision, get_struct_fields, BevyManifest}; use proc_macro::TokenStream; -use proc_macro2::{Span, TokenStream as TokenStream2}; +use proc_macro2::{Ident, Span}; use quote::{format_ident, quote}; use syn::{ parse_macro_input, parse_quote, punctuated::Punctuated, spanned::Spanned, token::Comma, @@ -131,9 +134,10 @@ pub fn derive_bundle(input: TokenStream) -> TokenStream { // - ComponentId is returned in field-definition-order. [get_components] uses field-definition-order // - `Bundle::get_components` is exactly once for each member. Rely's on the Component -> Bundle implementation to properly pass // the correct `StorageType` into the callback. + #[allow(deprecated)] unsafe impl #impl_generics #ecs_path::bundle::Bundle for #struct_name #ty_generics #where_clause { fn component_ids( - components: &mut #ecs_path::component::Components, + components: &mut #ecs_path::component::ComponentsRegistrator, ids: &mut impl FnMut(#ecs_path::component::ComponentId) ){ #(#field_component_ids)* @@ -147,7 +151,7 @@ pub fn derive_bundle(input: TokenStream) -> TokenStream { } fn register_required_components( - components: &mut #ecs_path::component::Components, + components: &mut #ecs_path::component::ComponentsRegistrator, required_components: &mut #ecs_path::component::RequiredComponents ){ #(#field_required_components)* @@ -156,6 +160,7 @@ pub fn derive_bundle(input: TokenStream) -> TokenStream { // SAFETY: // - ComponentId is returned in field-definition-order. [from_components] uses field-definition-order + #[allow(deprecated)] unsafe impl #impl_generics #ecs_path::bundle::BundleFromComponents for #struct_name #ty_generics #where_clause { #[allow(unused_variables, non_snake_case)] unsafe fn from_components<__T, __F>(ctx: &mut __T, func: &mut __F) -> Self @@ -168,6 +173,7 @@ pub fn derive_bundle(input: TokenStream) -> TokenStream { } } + #[allow(deprecated)] impl #impl_generics #ecs_path::bundle::DynamicBundle for #struct_name #ty_generics #where_clause { type Effect = (); #[allow(unused_variables)] @@ -182,105 +188,22 @@ pub fn derive_bundle(input: TokenStream) -> TokenStream { }) } -fn derive_visit_entities_base( - input: TokenStream, - trait_name: TokenStream2, - gen_methods: impl FnOnce(Vec) -> TokenStream2, -) -> TokenStream { +#[proc_macro_derive(MapEntities, attributes(entities))] +pub fn derive_map_entities(input: TokenStream) -> TokenStream { let ast = parse_macro_input!(input as DeriveInput); let ecs_path = bevy_ecs_path(); - - let named_fields = match get_struct_fields(&ast.data) { - Ok(fields) => fields, - Err(e) => return e.into_compile_error().into(), - }; - - let field = named_fields - .iter() - .filter_map(|field| { - if let Some(attr) = field - .attrs - .iter() - .find(|a| a.path().is_ident("visit_entities")) - { - let ignore = attr.parse_nested_meta(|meta| { - if meta.path.is_ident("ignore") { - Ok(()) - } else { - Err(meta.error("Invalid visit_entities attribute. Use `ignore`")) - } - }); - return match ignore { - Ok(()) => None, - Err(e) => Some(Err(e)), - }; - } - Some(Ok(field)) - }) - .map(|res| res.map(|field| field.ident.as_ref())) - .collect::, _>>(); - - let field = match field { - Ok(field) => field, - Err(e) => return e.into_compile_error().into(), - }; - - if field.is_empty() { - return syn::Error::new( - ast.span(), - format!("Invalid `{}` type: at least one field", trait_name), - ) - .into_compile_error() - .into(); - } - - let field_access = field - .iter() - .enumerate() - .map(|(n, f)| { - if let Some(ident) = f { - quote! { - self.#ident - } - } else { - let idx = Index::from(n); - quote! { - self.#idx - } - } - }) - .collect::>(); - - let methods = gen_methods(field_access); - - let generics = ast.generics; - let (impl_generics, ty_generics, _) = generics.split_for_impl(); + let map_entities_impl = map_entities( + &ast.data, + Ident::new("self", Span::call_site()), + false, + false, + ); let struct_name = &ast.ident; - + let (impl_generics, type_generics, where_clause) = &ast.generics.split_for_impl(); TokenStream::from(quote! { - impl #impl_generics #ecs_path::entity:: #trait_name for #struct_name #ty_generics { - #methods - } - }) -} - -#[proc_macro_derive(VisitEntitiesMut, attributes(visit_entities))] -pub fn derive_visit_entities_mut(input: TokenStream) -> TokenStream { - derive_visit_entities_base(input, quote! { VisitEntitiesMut }, |field| { - quote! { - fn visit_entities_mut(&mut self, mut f: F) { - #(#field.visit_entities_mut(&mut f);)* - } - } - }) -} - -#[proc_macro_derive(VisitEntities, attributes(visit_entities))] -pub fn derive_visit_entities(input: TokenStream) -> TokenStream { - derive_visit_entities_base(input, quote! { VisitEntities }, |field| { - quote! { - fn visit_entities(&self, mut f: F) { - #(#field.visit_entities(&mut f);)* + impl #impl_generics #ecs_path::entity::MapEntities for #struct_name #type_generics #where_clause { + fn map_entities(&mut self, mapper: &mut M) { + #map_entities_impl } } }) @@ -306,19 +229,39 @@ pub fn derive_system_param(input: TokenStream) -> TokenStream { let path = bevy_ecs_path(); let mut field_locals = Vec::new(); + let mut field_names = Vec::new(); let mut fields = Vec::new(); let mut field_types = Vec::new(); + let mut field_messages = Vec::new(); for (i, field) in field_definitions.iter().enumerate() { field_locals.push(format_ident!("f{i}")); let i = Index::from(i); - fields.push( - field - .ident - .as_ref() - .map(|f| quote! { #f }) - .unwrap_or_else(|| quote! { #i }), - ); + let field_value = field + .ident + .as_ref() + .map(|f| quote! { #f }) + .unwrap_or_else(|| quote! { #i }); + field_names.push(format!("::{}", field_value)); + fields.push(field_value); field_types.push(&field.ty); + let mut field_message = None; + for meta in field + .attrs + .iter() + .filter(|a| a.path().is_ident("system_param")) + { + if let Err(e) = meta.parse_nested_meta(|nested| { + if nested.path.is_ident("validation_message") { + field_message = Some(nested.value()?.parse()?); + Ok(()) + } else { + Err(nested.error("Unsupported attribute")) + } + }) { + return e.into_compile_error().into(); + } + } + field_messages.push(field_message.unwrap_or_else(|| quote! { err.message })); } let generics = ast.generics; @@ -504,10 +447,15 @@ pub fn derive_system_param(input: TokenStream) -> TokenStream { #[inline] unsafe fn validate_param<'w, 's>( state: &'s Self::State, - system_meta: &#path::system::SystemMeta, - world: #path::world::unsafe_world_cell::UnsafeWorldCell<'w>, - ) -> bool { - <(#(#tuple_types,)*) as #path::system::SystemParam>::validate_param(&state.state, system_meta, world) + _system_meta: &#path::system::SystemMeta, + _world: #path::world::unsafe_world_cell::UnsafeWorldCell<'w>, + ) -> Result<(), #path::system::SystemParamValidationError> { + let #state_struct_name { state: (#(#tuple_patterns,)*) } = state; + #( + <#field_types as #path::system::SystemParam>::validate_param(#field_locals, _system_meta, _world) + .map_err(|err| #path::system::SystemParamValidationError::new::(err.skipped, #field_messages, #field_names))?; + )* + Ok(()) } #[inline] @@ -582,7 +530,7 @@ pub(crate) fn bevy_ecs_path() -> syn::Path { BevyManifest::shared().get_path("bevy_ecs") } -#[proc_macro_derive(Event)] +#[proc_macro_derive(Event, attributes(event))] pub fn derive_event(input: TokenStream) -> TokenStream { component::derive_event(input) } @@ -594,20 +542,12 @@ pub fn derive_resource(input: TokenStream) -> TokenStream { #[proc_macro_derive( Component, - attributes(component, relationship, relationship_target, entities) + attributes(component, require, relationship, relationship_target, entities) )] pub fn derive_component(input: TokenStream) -> TokenStream { component::derive_component(input) } -/// Allows specifying a component's required components. -/// -/// See `Component` docs for usage. -#[proc_macro_attribute] -pub fn require(attr: TokenStream, item: TokenStream) -> TokenStream { - component::document_required_components(attr, item) -} - #[proc_macro_derive(States)] pub fn derive_states(input: TokenStream) -> TokenStream { states::derive_states(input) diff --git a/crates/bevy_ecs/macros/src/query_data.rs b/crates/bevy_ecs/macros/src/query_data.rs index ffac58ef1d..d919d0b05e 100644 --- a/crates/bevy_ecs/macros/src/query_data.rs +++ b/crates/bevy_ecs/macros/src/query_data.rs @@ -254,6 +254,7 @@ pub fn derive_query_data_impl(input: TokenStream) -> TokenStream { /// SAFETY: we assert fields are readonly below unsafe impl #user_impl_generics #path::query::QueryData for #read_only_struct_name #user_ty_generics #user_where_clauses { + const IS_READ_ONLY: bool = true; type ReadOnly = #read_only_struct_name #user_ty_generics; type Item<'__w> = #read_only_item_struct_name #user_ty_generics_with_world; @@ -284,10 +285,13 @@ pub fn derive_query_data_impl(input: TokenStream) -> TokenStream { quote! {} }; + let is_read_only = !attributes.is_mutable; + quote! { /// SAFETY: we assert fields are readonly below unsafe impl #user_impl_generics #path::query::QueryData for #struct_name #user_ty_generics #user_where_clauses { + const IS_READ_ONLY: bool = #is_read_only; type ReadOnly = #read_only_struct_name #user_ty_generics; type Item<'__w> = #item_struct_name #user_ty_generics_with_world; diff --git a/crates/bevy_ecs/src/archetype.rs b/crates/bevy_ecs/src/archetype.rs index e6ae5e4ae3..2468c5e9a8 100644 --- a/crates/bevy_ecs/src/archetype.rs +++ b/crates/bevy_ecs/src/archetype.rs @@ -27,7 +27,7 @@ use crate::{ storage::{ImmutableSparseSet, SparseArray, SparseSet, SparseSetIndex, TableId, TableRow}, }; use alloc::{boxed::Box, vec::Vec}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use core::{ hash::Hash, ops::{Index, IndexMut, RangeFrom}, @@ -942,7 +942,7 @@ impl Archetypes { let archetypes = &mut self.archetypes; let archetype_component_count = &mut self.archetype_component_count; let component_index = &mut self.by_component; - let archetype_id = *self + *self .by_components .entry(archetype_identity) .or_insert_with_key(move |identity| { @@ -975,8 +975,7 @@ impl Archetypes { .zip(sparse_set_archetype_components), )); id - }); - archetype_id + }) } /// Returns the number of components that are stored in archetypes. diff --git a/crates/bevy_ecs/src/bundle.rs b/crates/bevy_ecs/src/bundle.rs index 285e42d4e6..5666d90c53 100644 --- a/crates/bevy_ecs/src/bundle.rs +++ b/crates/bevy_ecs/src/bundle.rs @@ -9,29 +9,29 @@ use crate::{ Archetype, ArchetypeAfterBundleInsert, ArchetypeId, Archetypes, BundleComponentStatus, ComponentStatus, SpawnBundleStatus, }, + change_detection::MaybeLocation, component::{ - Component, ComponentId, Components, RequiredComponentConstructor, RequiredComponents, - StorageType, Tick, + Component, ComponentId, Components, ComponentsRegistrator, RequiredComponentConstructor, + RequiredComponents, StorageType, Tick, }, entity::{Entities, Entity, EntityLocation}, observer::Observers, prelude::World, query::DebugCheckedUnwrap, + relationship::RelationshipHookMode, storage::{SparseSetIndex, SparseSets, Storages, Table, TableRow}, world::{unsafe_world_cell::UnsafeWorldCell, EntityWorldMut, ON_ADD, ON_INSERT, ON_REPLACE}, }; use alloc::{boxed::Box, vec, vec::Vec}; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_ptr::{ConstNonNull, OwningPtr}; use bevy_utils::TypeIdMap; -#[cfg(feature = "track_location")] -use core::panic::Location; use core::{any::TypeId, ptr::NonNull}; use variadics_please::all_tuples; /// The `Bundle` trait enables insertion and removal of [`Component`]s from an entity. /// -/// Implementors of the `Bundle` trait are called 'bundles'. +/// Implementers of the `Bundle` trait are called 'bundles'. /// /// Each bundle represents a static set of [`Component`] types. /// Currently, bundles can only contain one of each [`Component`], and will @@ -72,7 +72,7 @@ use variadics_please::all_tuples; /// That is, if the entity does not have all the components of the bundle, those /// which are present will be removed. /// -/// # Implementors +/// # Implementers /// /// Every type which implements [`Component`] also implements `Bundle`, since /// [`Component`] types can be added to or removed from an entity. @@ -151,14 +151,14 @@ use variadics_please::all_tuples; pub unsafe trait Bundle: DynamicBundle + Send + Sync + 'static { /// Gets this [`Bundle`]'s component ids, in the order of this bundle's [`Component`]s #[doc(hidden)] - fn component_ids(components: &mut Components, ids: &mut impl FnMut(ComponentId)); + fn component_ids(components: &mut ComponentsRegistrator, ids: &mut impl FnMut(ComponentId)); /// Gets this [`Bundle`]'s component ids. This will be [`None`] if the component has not been registered. fn get_component_ids(components: &Components, ids: &mut impl FnMut(Option)); /// Registers components that are required by the components in this [`Bundle`]. fn register_required_components( - _components: &mut Components, + _components: &mut ComponentsRegistrator, _required_components: &mut RequiredComponents, ); } @@ -223,12 +223,12 @@ pub trait BundleEffect { // - `Bundle::component_ids` calls `ids` for C's component id (and nothing else) // - `Bundle::get_components` is called exactly once for C and passes the component's storage type based on its associated constant. unsafe impl Bundle for C { - fn component_ids(components: &mut Components, ids: &mut impl FnMut(ComponentId)) { + fn component_ids(components: &mut ComponentsRegistrator, ids: &mut impl FnMut(ComponentId)) { ids(components.register_component::()); } fn register_required_components( - components: &mut Components, + components: &mut ComponentsRegistrator, required_components: &mut RequiredComponents, ) { let component_id = components.register_component::(); @@ -288,7 +288,7 @@ macro_rules! tuple_impl { // - `Bundle::get_components` is called exactly once for each member. Relies on the above implementation to pass the correct // `StorageType` into the callback. unsafe impl<$($name: Bundle),*> Bundle for ($($name,)*) { - fn component_ids(components: &mut Components, ids: &mut impl FnMut(ComponentId)){ + fn component_ids(components: &mut ComponentsRegistrator, ids: &mut impl FnMut(ComponentId)){ $(<$name as Bundle>::component_ids(components, ids);)* } @@ -297,7 +297,7 @@ macro_rules! tuple_impl { } fn register_required_components( - components: &mut Components, + components: &mut ComponentsRegistrator, required_components: &mut RequiredComponents, ) { $(<$name as Bundle>::register_required_components(components, required_components);)* @@ -623,7 +623,7 @@ impl BundleInfo { change_tick: Tick, bundle: T, insert_mode: InsertMode, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) -> T::Effect { // NOTE: get_components calls this closure on each component in "bundle order". // bundle_info.component_ids are also in "bundle order" @@ -638,20 +638,12 @@ impl BundleInfo { // the target table contains the component. let column = table.get_column_mut(component_id).debug_checked_unwrap(); match (status, insert_mode) { - (ComponentStatus::Added, _) => column.initialize( - table_row, - component_ptr, - change_tick, - #[cfg(feature = "track_location")] - caller, - ), - (ComponentStatus::Existing, InsertMode::Replace) => column.replace( - table_row, - component_ptr, - change_tick, - #[cfg(feature = "track_location")] - caller, - ), + (ComponentStatus::Added, _) => { + column.initialize(table_row, component_ptr, change_tick, caller); + } + (ComponentStatus::Existing, InsertMode::Replace) => { + column.replace(table_row, component_ptr, change_tick, caller); + } (ComponentStatus::Existing, InsertMode::Keep) => { if let Some(drop_fn) = table.get_drop_for(component_id) { drop_fn(component_ptr); @@ -664,13 +656,7 @@ impl BundleInfo { // SAFETY: If component_id is in self.component_ids, BundleInfo::new ensures that // a sparse set exists for the component. unsafe { sparse_sets.get_mut(component_id).debug_checked_unwrap() }; - sparse_set.insert( - entity, - component_ptr, - change_tick, - #[cfg(feature = "track_location")] - caller, - ); + sparse_set.insert(entity, component_ptr, change_tick, caller); } } bundle_component += 1; @@ -683,7 +669,6 @@ impl BundleInfo { change_tick, table_row, entity, - #[cfg(feature = "track_location")] caller, ); } @@ -712,7 +697,7 @@ impl BundleInfo { component_id: ComponentId, storage_type: StorageType, component_ptr: OwningPtr, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { { match storage_type { @@ -721,26 +706,14 @@ impl BundleInfo { // SAFETY: If component_id is in required_components, BundleInfo::new requires that // the target table contains the component. unsafe { table.get_column_mut(component_id).debug_checked_unwrap() }; - column.initialize( - table_row, - component_ptr, - change_tick, - #[cfg(feature = "track_location")] - caller, - ); + column.initialize(table_row, component_ptr, change_tick, caller); } StorageType::SparseSet => { let sparse_set = // SAFETY: If component_id is in required_components, BundleInfo::new requires that // a sparse set exists for the component. unsafe { sparse_sets.get_mut(component_id).debug_checked_unwrap() }; - sparse_set.insert( - entity, - component_ptr, - change_tick, - #[cfg(feature = "track_location")] - caller, - ); + sparse_set.insert(entity, component_ptr, change_tick, caller); } } } @@ -1026,9 +999,12 @@ impl<'w> BundleInserter<'w> { archetype_id: ArchetypeId, change_tick: Tick, ) -> Self { + // SAFETY: These come from the same world. `world.components_registrator` can't be used since we borrow other fields too. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut world.components, &mut world.component_ids) }; let bundle_id = world .bundles - .register_info::(&mut world.components, &mut world.storages); + .register_info::(&mut registrator, &mut world.storages); // SAFETY: We just ensured this bundle exists unsafe { Self::new_with_id(world, archetype_id, bundle_id, change_tick) } } @@ -1127,7 +1103,8 @@ impl<'w> BundleInserter<'w> { location: EntityLocation, bundle: T, insert_mode: InsertMode, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, + relationship_hook_mode: RelationshipHookMode, ) -> (EntityLocation, T::Effect) { let bundle_info = self.bundle_info.as_ref(); let archetype_after_insert = self.archetype_after_insert.as_ref(); @@ -1145,7 +1122,6 @@ impl<'w> BundleInserter<'w> { ON_REPLACE, entity, archetype_after_insert.iter_existing(), - #[cfg(feature = "track_location")] caller, ); } @@ -1153,8 +1129,8 @@ impl<'w> BundleInserter<'w> { archetype, entity, archetype_after_insert.iter_existing(), - #[cfg(feature = "track_location")] caller, + relationship_hook_mode, ); } } @@ -1183,7 +1159,6 @@ impl<'w> BundleInserter<'w> { self.change_tick, bundle, insert_mode, - #[cfg(feature = "track_location")] caller, ); @@ -1225,7 +1200,6 @@ impl<'w> BundleInserter<'w> { self.change_tick, bundle, insert_mode, - #[cfg(feature = "track_location")] caller, ); @@ -1308,7 +1282,6 @@ impl<'w> BundleInserter<'w> { self.change_tick, bundle, insert_mode, - #[cfg(feature = "track_location")] caller, ); @@ -1327,7 +1300,6 @@ impl<'w> BundleInserter<'w> { new_archetype, entity, archetype_after_insert.iter_added(), - #[cfg(feature = "track_location")] caller, ); if new_archetype.has_add_observer() { @@ -1335,7 +1307,6 @@ impl<'w> BundleInserter<'w> { ON_ADD, entity, archetype_after_insert.iter_added(), - #[cfg(feature = "track_location")] caller, ); } @@ -1346,15 +1317,14 @@ impl<'w> BundleInserter<'w> { new_archetype, entity, archetype_after_insert.iter_inserted(), - #[cfg(feature = "track_location")] caller, + relationship_hook_mode, ); if new_archetype.has_insert_observer() { deferred_world.trigger_observers( ON_INSERT, entity, archetype_after_insert.iter_inserted(), - #[cfg(feature = "track_location")] caller, ); } @@ -1366,15 +1336,14 @@ impl<'w> BundleInserter<'w> { new_archetype, entity, archetype_after_insert.iter_added(), - #[cfg(feature = "track_location")] caller, + relationship_hook_mode, ); if new_archetype.has_insert_observer() { deferred_world.trigger_observers( ON_INSERT, entity, archetype_after_insert.iter_added(), - #[cfg(feature = "track_location")] caller, ); } @@ -1404,9 +1373,12 @@ pub(crate) struct BundleSpawner<'w> { impl<'w> BundleSpawner<'w> { #[inline] pub fn new(world: &'w mut World, change_tick: Tick) -> Self { + // SAFETY: These come from the same world. `world.components_registrator` can't be used since we borrow other fields too. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut world.components, &mut world.component_ids) }; let bundle_id = world .bundles - .register_info::(&mut world.components, &mut world.storages); + .register_info::(&mut registrator, &mut world.storages); // SAFETY: we initialized this bundle_id in `init_info` unsafe { Self::new_with_id(world, bundle_id, change_tick) } } @@ -1456,7 +1428,7 @@ impl<'w> BundleSpawner<'w> { &mut self, entity: Entity, bundle: T, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) -> (EntityLocation, T::Effect) { // SAFETY: We do not make any structural changes to the archetype graph through self.world so these pointers always remain valid let bundle_info = self.bundle_info.as_ref(); @@ -1481,7 +1453,6 @@ impl<'w> BundleSpawner<'w> { self.change_tick, bundle, InsertMode::Replace, - #[cfg(feature = "track_location")] caller, ); entities.set(entity.index(), location); @@ -1499,7 +1470,6 @@ impl<'w> BundleSpawner<'w> { archetype, entity, bundle_info.iter_contributed_components(), - #[cfg(feature = "track_location")] caller, ); if archetype.has_add_observer() { @@ -1507,7 +1477,6 @@ impl<'w> BundleSpawner<'w> { ON_ADD, entity, bundle_info.iter_contributed_components(), - #[cfg(feature = "track_location")] caller, ); } @@ -1515,15 +1484,14 @@ impl<'w> BundleSpawner<'w> { archetype, entity, bundle_info.iter_contributed_components(), - #[cfg(feature = "track_location")] caller, + RelationshipHookMode::Run, ); if archetype.has_insert_observer() { deferred_world.trigger_observers( ON_INSERT, entity, bundle_info.iter_contributed_components(), - #[cfg(feature = "track_location")] caller, ); } @@ -1538,18 +1506,11 @@ impl<'w> BundleSpawner<'w> { pub unsafe fn spawn( &mut self, bundle: T, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) -> (Entity, T::Effect) { let entity = self.entities().alloc(); // SAFETY: entity is allocated (but non-existent), `T` matches this BundleInfo's type - let (_, after_effect) = unsafe { - self.spawn_non_existent( - entity, - bundle, - #[cfg(feature = "track_location")] - caller, - ) - }; + let (_, after_effect) = unsafe { self.spawn_non_existent(entity, bundle, caller) }; (entity, after_effect) } @@ -1620,11 +1581,11 @@ impl Bundles { /// Also registers all the components in the bundle. pub(crate) fn register_info( &mut self, - components: &mut Components, + components: &mut ComponentsRegistrator, storages: &mut Storages, ) -> BundleId { let bundle_infos = &mut self.bundle_infos; - let id = *self.bundle_ids.entry(TypeId::of::()).or_insert_with(|| { + *self.bundle_ids.entry(TypeId::of::()).or_insert_with(|| { let mut component_ids= Vec::new(); T::component_ids(components, &mut |id| component_ids.push(id)); let id = BundleId(bundle_infos.len()); @@ -1636,8 +1597,7 @@ impl Bundles { unsafe { BundleInfo::new(core::any::type_name::(), storages, components, component_ids, id) }; bundle_infos.push(bundle_info); id - }); - id + }) } /// Registers a new [`BundleInfo`], which contains both explicit and required components for a statically known type. @@ -1645,7 +1605,7 @@ impl Bundles { /// Also registers all the components in the bundle. pub(crate) fn register_contributed_bundle_info( &mut self, - components: &mut Components, + components: &mut ComponentsRegistrator, storages: &mut Storages, ) -> BundleId { if let Some(id) = self.contributed_bundle_ids.get(&TypeId::of::()).cloned() { diff --git a/crates/bevy_ecs/src/change_detection.rs b/crates/bevy_ecs/src/change_detection.rs index 7af159e5bd..767134fdc6 100644 --- a/crates/bevy_ecs/src/change_detection.rs +++ b/crates/bevy_ecs/src/change_detection.rs @@ -7,14 +7,13 @@ use crate::{ }; use alloc::borrow::ToOwned; use bevy_ptr::{Ptr, UnsafeCellDeref}; +#[cfg(feature = "bevy_reflect")] +use bevy_reflect::Reflect; use core::{ + marker::PhantomData, mem, ops::{Deref, DerefMut}, -}; -#[cfg(feature = "track_location")] -use { - bevy_ptr::ThinSlicePtr, - core::{cell::UnsafeCell, panic::Location}, + panic::Location, }; /// The (arbitrarily chosen) minimum number of world tick increments between `check_tick` scans. @@ -72,9 +71,11 @@ pub trait DetectChanges { /// [`SystemParam`](crate::system::SystemParam). fn last_changed(&self) -> Tick; + /// Returns the change tick recording the time this data was added. + fn added(&self) -> Tick; + /// The location that last caused this to change. - #[cfg(feature = "track_location")] - fn changed_by(&self) -> &'static Location<'static>; + fn changed_by(&self) -> MaybeLocation; } /// Types that implement reliable change detection. @@ -120,6 +121,15 @@ pub trait DetectChangesMut: DetectChanges { /// **Note**: This operation cannot be undone. fn set_changed(&mut self); + /// Flags this value as having been added. + /// + /// It is not normally necessary to call this method. + /// The 'added' tick is set when the value is first added, + /// and is not normally changed afterwards. + /// + /// **Note**: This operation cannot be undone. + fn set_added(&mut self); + /// Manually sets the change tick recording the time when this data was last mutated. /// /// # Warning @@ -128,6 +138,12 @@ pub trait DetectChangesMut: DetectChanges { /// If you want to avoid triggering change detection, use [`bypass_change_detection`](DetectChangesMut::bypass_change_detection) instead. fn set_last_changed(&mut self, last_changed: Tick); + /// Manually sets the added tick recording the time when this data was last added. + /// + /// # Warning + /// The caveats of [`set_last_changed`](DetectChangesMut::set_last_changed) apply. This modifies both the added and changed ticks together. + fn set_last_added(&mut self, last_added: Tick); + /// Manually bypasses change detection, allowing you to mutate the underlying value without updating the change tick. /// /// # Warning @@ -225,7 +241,7 @@ pub trait DetectChangesMut: DetectChanges { /// let new_score = 0; /// if let Some(Score(previous_score)) = score.replace_if_neq(Score(new_score)) { /// // If `score` change, emit a `ScoreChanged` event. - /// score_changed.send(ScoreChanged { + /// score_changed.write(ScoreChanged { /// current: new_score, /// previous: previous_score, /// }); @@ -343,9 +359,13 @@ macro_rules! change_detection_impl { } #[inline] - #[cfg(feature = "track_location")] - fn changed_by(&self) -> &'static Location<'static> { - self.changed_by + fn added(&self) -> Tick { + *self.ticks.added + } + + #[inline] + fn changed_by(&self) -> MaybeLocation { + self.changed_by.copied() } } @@ -376,20 +396,30 @@ macro_rules! change_detection_mut_impl { #[track_caller] fn set_changed(&mut self) { *self.ticks.changed = self.ticks.this_run; - #[cfg(feature = "track_location")] - { - *self.changed_by = Location::caller(); - } + self.changed_by.assign(MaybeLocation::caller()); + } + + #[inline] + #[track_caller] + fn set_added(&mut self) { + *self.ticks.changed = self.ticks.this_run; + *self.ticks.added = self.ticks.this_run; + self.changed_by.assign(MaybeLocation::caller()); } #[inline] #[track_caller] fn set_last_changed(&mut self, last_changed: Tick) { *self.ticks.changed = last_changed; - #[cfg(feature = "track_location")] - { - *self.changed_by = Location::caller(); - } + self.changed_by.assign(MaybeLocation::caller()); + } + + #[inline] + #[track_caller] + fn set_last_added(&mut self, last_added: Tick) { + *self.ticks.added = last_added; + *self.ticks.changed = last_added; + self.changed_by.assign(MaybeLocation::caller()); } #[inline] @@ -403,10 +433,7 @@ macro_rules! change_detection_mut_impl { #[track_caller] fn deref_mut(&mut self) -> &mut Self::Target { self.set_changed(); - #[cfg(feature = "track_location")] - { - *self.changed_by = Location::caller(); - } + self.changed_by.assign(MaybeLocation::caller()); self.value } } @@ -444,8 +471,7 @@ macro_rules! impl_methods { last_run: self.ticks.last_run, this_run: self.ticks.this_run, }, - #[cfg(feature = "track_location")] - changed_by: self.changed_by, + changed_by: self.changed_by.as_deref_mut(), } } @@ -475,7 +501,6 @@ macro_rules! impl_methods { Mut { value: f(self.value), ticks: self.ticks, - #[cfg(feature = "track_location")] changed_by: self.changed_by, } } @@ -489,7 +514,6 @@ macro_rules! impl_methods { value.map(|value| Mut { value, ticks: self.ticks, - #[cfg(feature = "track_location")] changed_by: self.changed_by, }) } @@ -503,7 +527,6 @@ macro_rules! impl_methods { value.map(|value| Mut { value, ticks: self.ticks, - #[cfg(feature = "track_location")] changed_by: self.changed_by, }) } @@ -614,8 +637,7 @@ impl<'w> From> for Ticks<'w> { pub struct Res<'w, T: ?Sized + Resource> { pub(crate) value: &'w T, pub(crate) ticks: Ticks<'w>, - #[cfg(feature = "track_location")] - pub(crate) changed_by: &'static Location<'static>, + pub(crate) changed_by: MaybeLocation<&'w &'static Location<'static>>, } impl<'w, T: Resource> Res<'w, T> { @@ -631,7 +653,6 @@ impl<'w, T: Resource> Res<'w, T> { Self { value: this.value, ticks: this.ticks.clone(), - #[cfg(feature = "track_location")] changed_by: this.changed_by, } } @@ -649,8 +670,7 @@ impl<'w, T: Resource> From> for Res<'w, T> { Self { value: res.value, ticks: res.ticks.into(), - #[cfg(feature = "track_location")] - changed_by: res.changed_by, + changed_by: res.changed_by.map(|changed_by| &*changed_by), } } } @@ -662,7 +682,6 @@ impl<'w, T: Resource> From> for Ref<'w, T> { Self { value: res.value, ticks: res.ticks, - #[cfg(feature = "track_location")] changed_by: res.changed_by, } } @@ -695,8 +714,7 @@ impl_debug!(Res<'w, T>, Resource); pub struct ResMut<'w, T: ?Sized + Resource> { pub(crate) value: &'w mut T, pub(crate) ticks: TicksMut<'w>, - #[cfg(feature = "track_location")] - pub(crate) changed_by: &'w mut &'static Location<'static>, + pub(crate) changed_by: MaybeLocation<&'w mut &'static Location<'static>>, } impl<'w, 'a, T: Resource> IntoIterator for &'a ResMut<'w, T> @@ -736,7 +754,6 @@ impl<'w, T: Resource> From> for Mut<'w, T> { Mut { value: other.value, ticks: other.ticks, - #[cfg(feature = "track_location")] changed_by: other.changed_by, } } @@ -756,8 +773,7 @@ impl<'w, T: Resource> From> for Mut<'w, T> { pub struct NonSendMut<'w, T: ?Sized + 'static> { pub(crate) value: &'w mut T, pub(crate) ticks: TicksMut<'w>, - #[cfg(feature = "track_location")] - pub(crate) changed_by: &'w mut &'static Location<'static>, + pub(crate) changed_by: MaybeLocation<&'w mut &'static Location<'static>>, } change_detection_impl!(NonSendMut<'w, T>, T,); @@ -772,7 +788,6 @@ impl<'w, T: 'static> From> for Mut<'w, T> { Mut { value: other.value, ticks: other.ticks, - #[cfg(feature = "track_location")] changed_by: other.changed_by, } } @@ -805,8 +820,7 @@ impl<'w, T: 'static> From> for Mut<'w, T> { pub struct Ref<'w, T: ?Sized> { pub(crate) value: &'w T, pub(crate) ticks: Ticks<'w>, - #[cfg(feature = "track_location")] - pub(crate) changed_by: &'static Location<'static>, + pub(crate) changed_by: MaybeLocation<&'w &'static Location<'static>>, } impl<'w, T: ?Sized> Ref<'w, T> { @@ -823,7 +837,6 @@ impl<'w, T: ?Sized> Ref<'w, T> { Ref { value: f(self.value), ticks: self.ticks, - #[cfg(feature = "track_location")] changed_by: self.changed_by, } } @@ -837,7 +850,7 @@ impl<'w, T: ?Sized> Ref<'w, T> { /// - `added` - A [`Tick`] that stores the tick when the wrapped value was created. /// - `changed` - A [`Tick`] that stores the last time the wrapped value was changed. /// - `last_run` - A [`Tick`], occurring before `this_run`, which is used - /// as a reference to determine whether the wrapped value is newly added or changed. + /// as a reference to determine whether the wrapped value is newly added or changed. /// - `this_run` - A [`Tick`] corresponding to the current point in time -- "now". pub fn new( value: &'w T, @@ -845,7 +858,7 @@ impl<'w, T: ?Sized> Ref<'w, T> { changed: &'w Tick, last_run: Tick, this_run: Tick, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation<&'w &'static Location<'static>>, ) -> Ref<'w, T> { Ref { value, @@ -855,10 +868,18 @@ impl<'w, T: ?Sized> Ref<'w, T> { last_run, this_run, }, - #[cfg(feature = "track_location")] changed_by: caller, } } + + /// Overwrite the `last_run` and `this_run` tick that are used for change detection. + /// + /// This is an advanced feature. `Ref`s are usually _created_ by engine-internal code and + /// _consumed_ by end-user code. + pub fn set_ticks(&mut self, last_run: Tick, this_run: Tick) { + self.ticks.last_run = last_run; + self.ticks.this_run = this_run; + } } impl<'w, 'a, T> IntoIterator for &'a Ref<'w, T> @@ -938,8 +959,7 @@ impl_debug!(Ref<'w, T>,); pub struct Mut<'w, T: ?Sized> { pub(crate) value: &'w mut T, pub(crate) ticks: TicksMut<'w>, - #[cfg(feature = "track_location")] - pub(crate) changed_by: &'w mut &'static Location<'static>, + pub(crate) changed_by: MaybeLocation<&'w mut &'static Location<'static>>, } impl<'w, T: ?Sized> Mut<'w, T> { @@ -964,7 +984,7 @@ impl<'w, T: ?Sized> Mut<'w, T> { last_changed: &'w mut Tick, last_run: Tick, this_run: Tick, - #[cfg(feature = "track_location")] caller: &'w mut &'static Location<'static>, + caller: MaybeLocation<&'w mut &'static Location<'static>>, ) -> Self { Self { value, @@ -974,10 +994,18 @@ impl<'w, T: ?Sized> Mut<'w, T> { last_run, this_run, }, - #[cfg(feature = "track_location")] changed_by: caller, } } + + /// Overwrite the `last_run` and `this_run` tick that are used for change detection. + /// + /// This is an advanced feature. `Mut`s are usually _created_ by engine-internal code and + /// _consumed_ by end-user code. + pub fn set_ticks(&mut self, last_run: Tick, this_run: Tick) { + self.ticks.last_run = last_run; + self.ticks.this_run = this_run; + } } impl<'w, T: ?Sized> From> for Ref<'w, T> { @@ -985,8 +1013,7 @@ impl<'w, T: ?Sized> From> for Ref<'w, T> { Self { value: mut_ref.value, ticks: mut_ref.ticks.into(), - #[cfg(feature = "track_location")] - changed_by: mut_ref.changed_by, + changed_by: mut_ref.changed_by.map(|changed_by| &*changed_by), } } } @@ -1032,8 +1059,7 @@ impl_debug!(Mut<'w, T>,); pub struct MutUntyped<'w> { pub(crate) value: PtrMut<'w>, pub(crate) ticks: TicksMut<'w>, - #[cfg(feature = "track_location")] - pub(crate) changed_by: &'w mut &'static Location<'static>, + pub(crate) changed_by: MaybeLocation<&'w mut &'static Location<'static>>, } impl<'w> MutUntyped<'w> { @@ -1058,8 +1084,7 @@ impl<'w> MutUntyped<'w> { last_run: self.ticks.last_run, this_run: self.ticks.this_run, }, - #[cfg(feature = "track_location")] - changed_by: self.changed_by, + changed_by: self.changed_by.as_deref_mut(), } } @@ -1110,7 +1135,6 @@ impl<'w> MutUntyped<'w> { Mut { value: f(self.value), ticks: self.ticks, - #[cfg(feature = "track_location")] changed_by: self.changed_by, } } @@ -1125,7 +1149,6 @@ impl<'w> MutUntyped<'w> { value: unsafe { self.value.deref_mut() }, ticks: self.ticks, // SAFETY: `caller` is `Aligned`. - #[cfg(feature = "track_location")] changed_by: self.changed_by, } } @@ -1152,9 +1175,13 @@ impl<'w> DetectChanges for MutUntyped<'w> { } #[inline] - #[cfg(feature = "track_location")] - fn changed_by(&self) -> &'static Location<'static> { - self.changed_by + fn changed_by(&self) -> MaybeLocation { + self.changed_by.copied() + } + + #[inline] + fn added(&self) -> Tick { + *self.ticks.added } } @@ -1165,20 +1192,30 @@ impl<'w> DetectChangesMut for MutUntyped<'w> { #[track_caller] fn set_changed(&mut self) { *self.ticks.changed = self.ticks.this_run; - #[cfg(feature = "track_location")] - { - *self.changed_by = Location::caller(); - } + self.changed_by.assign(MaybeLocation::caller()); + } + + #[inline] + #[track_caller] + fn set_added(&mut self) { + *self.ticks.changed = self.ticks.this_run; + *self.ticks.added = self.ticks.this_run; + self.changed_by.assign(MaybeLocation::caller()); } #[inline] #[track_caller] fn set_last_changed(&mut self, last_changed: Tick) { *self.ticks.changed = last_changed; - #[cfg(feature = "track_location")] - { - *self.changed_by = Location::caller(); - } + self.changed_by.assign(MaybeLocation::caller()); + } + + #[inline] + #[track_caller] + fn set_last_added(&mut self, last_added: Tick) { + *self.ticks.added = last_added; + *self.ticks.changed = last_added; + self.changed_by.assign(MaybeLocation::caller()); } #[inline] @@ -1201,62 +1238,294 @@ impl<'w, T> From> for MutUntyped<'w> { MutUntyped { value: value.value.into(), ticks: value.ticks, - #[cfg(feature = "track_location")] changed_by: value.changed_by, } } } -/// A type alias to [`&'static Location<'static>`](std::panic::Location) when the `track_location` feature is -/// enabled, and the unit type `()` when it is not. +/// A value that contains a `T` if the `track_location` feature is enabled, +/// and is a ZST if it is not. /// -/// This is primarily used in places where `#[cfg(...)]` attributes are not allowed, such as -/// function return types. Because unit is a zero-sized type, it is the equivalent of not using a -/// `Location` at all. +/// The overall API is similar to [`Option`], but whether the value is `Some` or `None` is set at compile +/// time and is the same for all values. /// -/// Please use this type sparingly: prefer normal `#[cfg(...)]` attributes when possible. -#[cfg(feature = "track_location")] -pub(crate) type MaybeLocation = &'static Location<'static>; +/// If the `track_location` feature is disabled, then all functions on this type that return +/// an `MaybeLocation` will have an empty body and should be removed by the optimizer. +/// +/// This allows code to be written that will be checked by the compiler even when the feature is disabled, +/// but that will be entirely removed during compilation. +#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct MaybeLocation> { + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] + marker: PhantomData, + #[cfg(feature = "track_location")] + value: T, +} -/// A type alias to [`&'static Location<'static>`](std::panic::Location) when the `track_location` feature is -/// enabled, and the unit type `()` when it is not. -/// -/// This is primarily used in places where `#[cfg(...)]` attributes are not allowed, such as -/// function return types. Because unit is a zero-sized type, it is the equivalent of not using a -/// `Location` at all. -/// -/// Please use this type sparingly: prefer normal `#[cfg(...)]` attributes when possible. -#[cfg(not(feature = "track_location"))] -pub(crate) type MaybeLocation = (); +impl core::fmt::Display for MaybeLocation { + fn fmt(&self, _f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + #[cfg(feature = "track_location")] + { + self.value.fmt(_f)?; + } + Ok(()) + } +} -/// A type alias to `&UnsafeCell<&'static Location<'static>>` when the `track_location` -/// feature is enabled, and the unit type `()` when it is not. -/// -/// See [`MaybeLocation`] for further information. -#[cfg(feature = "track_location")] -pub(crate) type MaybeUnsafeCellLocation<'a> = &'a UnsafeCell<&'static Location<'static>>; +impl MaybeLocation { + /// Constructs a new `MaybeLocation` that wraps the given value. + /// + /// This may only accept `Copy` types, + /// since it needs to drop the value if the `track_location` feature is disabled, + /// and non-`Copy` types cannot be dropped in `const` context. + /// Use [`new_with`][Self::new_with] if you need to construct a non-`Copy` value. + /// + /// # See also + /// - [`new_with`][Self::new_with] to initialize using a closure. + /// - [`new_with_flattened`][Self::new_with_flattened] to initialize using a closure that returns an `Option>`. + #[inline] + pub const fn new(_value: T) -> Self + where + T: Copy, + { + Self { + #[cfg(feature = "track_location")] + value: _value, + marker: PhantomData, + } + } -/// A type alias to `&UnsafeCell<&'static Location<'static>>` when the `track_location` -/// feature is enabled, and the unit type `()` when it is not. -/// -/// See [`MaybeLocation`] for further information. -#[cfg(not(feature = "track_location"))] -pub(crate) type MaybeUnsafeCellLocation<'a> = (); + /// Constructs a new `MaybeLocation` that wraps the result of the given closure. + /// + /// # See also + /// - [`new`][Self::new] to initialize using a value. + /// - [`new_with_flattened`][Self::new_with_flattened] to initialize using a closure that returns an `Option>`. + #[inline] + pub fn new_with(_f: impl FnOnce() -> T) -> Self { + Self { + #[cfg(feature = "track_location")] + value: _f(), + marker: PhantomData, + } + } -/// A type alias to `ThinSlicePtr<'w, UnsafeCell<&'static Location<'static>>>` when the -/// `track_location` feature is enabled, and the unit type `()` when it is not. -/// -/// See [`MaybeLocation`] for further information. -#[cfg(feature = "track_location")] -pub(crate) type MaybeThinSlicePtrLocation<'w> = - ThinSlicePtr<'w, UnsafeCell<&'static Location<'static>>>; + /// Maps an `MaybeLocation `to `MaybeLocation` by applying a function to a contained value. + #[inline] + pub fn map(self, _f: impl FnOnce(T) -> U) -> MaybeLocation { + MaybeLocation { + #[cfg(feature = "track_location")] + value: _f(self.value), + marker: PhantomData, + } + } -/// A type alias to `ThinSlicePtr<'w, UnsafeCell<&'static Location<'static>>>` when the -/// `track_location` feature is enabled, and the unit type `()` when it is not. -/// -/// See [`MaybeLocation`] for further information. -#[cfg(not(feature = "track_location"))] -pub(crate) type MaybeThinSlicePtrLocation<'w> = (); + /// Converts a pair of `MaybeLocation` values to an `MaybeLocation` of a tuple. + #[inline] + pub fn zip(self, _other: MaybeLocation) -> MaybeLocation<(T, U)> { + MaybeLocation { + #[cfg(feature = "track_location")] + value: (self.value, _other.value), + marker: PhantomData, + } + } + + /// Returns the contained value or a default. + /// If the `track_location` feature is enabled, this always returns the contained value. + /// If it is disabled, this always returns `T::Default()`. + #[inline] + pub fn unwrap_or_default(self) -> T + where + T: Default, + { + self.into_option().unwrap_or_default() + } + + /// Converts an `MaybeLocation` to an [`Option`] to allow run-time branching. + /// If the `track_location` feature is enabled, this always returns `Some`. + /// If it is disabled, this always returns `None`. + #[inline] + pub fn into_option(self) -> Option { + #[cfg(feature = "track_location")] + { + Some(self.value) + } + #[cfg(not(feature = "track_location"))] + { + None + } + } +} + +impl MaybeLocation> { + /// Constructs a new `MaybeLocation` that wraps the result of the given closure. + /// If the closure returns `Some`, it unwraps the inner value. + /// + /// # See also + /// - [`new`][Self::new] to initialize using a value. + /// - [`new_with`][Self::new_with] to initialize using a closure. + #[inline] + pub fn new_with_flattened(_f: impl FnOnce() -> Option>) -> Self { + Self { + #[cfg(feature = "track_location")] + value: _f().map(|value| value.value), + marker: PhantomData, + } + } + + /// Transposes a `MaybeLocation` of an [`Option`] into an [`Option`] of a `MaybeLocation`. + /// + /// This can be useful if you want to use the `?` operator to exit early + /// if the `track_location` feature is enabled but the value is not found. + /// + /// If the `track_location` feature is enabled, + /// this returns `Some` if the inner value is `Some` + /// and `None` if the inner value is `None`. + /// + /// If it is disabled, this always returns `Some`. + /// + /// # Example + /// + /// ``` + /// # use bevy_ecs::{change_detection::MaybeLocation, world::World}; + /// # use core::panic::Location; + /// # + /// # fn test() -> Option<()> { + /// let mut world = World::new(); + /// let entity = world.spawn(()).id(); + /// let location: MaybeLocation>> = + /// world.entities().entity_get_spawned_or_despawned_by(entity); + /// let location: MaybeLocation<&'static Location<'static>> = location.transpose()?; + /// # Some(()) + /// # } + /// # test(); + /// ``` + /// + /// # See also + /// + /// - [`into_option`][Self::into_option] to convert to an `Option>`. + /// When used with [`Option::flatten`], this will have a similar effect, + /// but will return `None` when the `track_location` feature is disabled. + #[inline] + pub fn transpose(self) -> Option> { + #[cfg(feature = "track_location")] + { + self.value.map(|value| MaybeLocation { + value, + marker: PhantomData, + }) + } + #[cfg(not(feature = "track_location"))] + { + Some(MaybeLocation { + marker: PhantomData, + }) + } + } +} + +impl MaybeLocation<&T> { + /// Maps an `MaybeLocation<&T>` to an `MaybeLocation` by copying the contents. + #[inline] + pub const fn copied(&self) -> MaybeLocation + where + T: Copy, + { + MaybeLocation { + #[cfg(feature = "track_location")] + value: *self.value, + marker: PhantomData, + } + } +} + +impl MaybeLocation<&mut T> { + /// Maps an `MaybeLocation<&mut T>` to an `MaybeLocation` by copying the contents. + #[inline] + pub const fn copied(&self) -> MaybeLocation + where + T: Copy, + { + MaybeLocation { + #[cfg(feature = "track_location")] + value: *self.value, + marker: PhantomData, + } + } + + /// Assigns the contents of an `MaybeLocation` to an `MaybeLocation<&mut T>`. + #[inline] + pub fn assign(&mut self, _value: MaybeLocation) { + #[cfg(feature = "track_location")] + { + *self.value = _value.value; + } + } +} + +impl MaybeLocation { + /// Converts from `&MaybeLocation` to `MaybeLocation<&T>`. + #[inline] + pub const fn as_ref(&self) -> MaybeLocation<&T> { + MaybeLocation { + #[cfg(feature = "track_location")] + value: &self.value, + marker: PhantomData, + } + } + + /// Converts from `&mut MaybeLocation` to `MaybeLocation<&mut T>`. + #[inline] + pub const fn as_mut(&mut self) -> MaybeLocation<&mut T> { + MaybeLocation { + #[cfg(feature = "track_location")] + value: &mut self.value, + marker: PhantomData, + } + } + + /// Converts from `&MaybeLocation` to `MaybeLocation<&T::Target>`. + #[inline] + pub fn as_deref(&self) -> MaybeLocation<&T::Target> + where + T: Deref, + { + MaybeLocation { + #[cfg(feature = "track_location")] + value: &*self.value, + marker: PhantomData, + } + } + + /// Converts from `&mut MaybeLocation` to `MaybeLocation<&mut T::Target>`. + #[inline] + pub fn as_deref_mut(&mut self) -> MaybeLocation<&mut T::Target> + where + T: DerefMut, + { + MaybeLocation { + #[cfg(feature = "track_location")] + value: &mut *self.value, + marker: PhantomData, + } + } +} + +impl MaybeLocation { + /// Returns the source location of the caller of this function. If that function's caller is + /// annotated then its call location will be returned, and so on up the stack to the first call + /// within a non-tracked function body. + #[inline] + #[track_caller] + pub fn caller() -> Self { + // Note that this cannot use `new_with`, since `FnOnce` invocations cannot be annotated with `#[track_caller]`. + MaybeLocation { + #[cfg(feature = "track_location")] + value: Location::caller(), + marker: PhantomData, + } + } +} #[cfg(test)] mod tests { @@ -1264,12 +1533,11 @@ mod tests { use bevy_ptr::PtrMut; use bevy_reflect::{FromType, ReflectFromPtr}; use core::ops::{Deref, DerefMut}; - #[cfg(feature = "track_location")] - use core::panic::Location; use crate::{ change_detection::{ - Mut, NonSendMut, Ref, ResMut, TicksMut, CHECK_TICK_THRESHOLD, MAX_CHANGE_AGE, + MaybeLocation, Mut, NonSendMut, Ref, ResMut, TicksMut, CHECK_TICK_THRESHOLD, + MAX_CHANGE_AGE, }, component::{Component, ComponentTicks, Tick}, system::{IntoSystem, Single, System}, @@ -1349,7 +1617,7 @@ mod tests { // Since the world is always ahead, as long as changes can't get older than `u32::MAX` (which we ensure), // the wrapping difference will always be positive, so wraparound doesn't matter. let mut query = world.query::>(); - assert!(query.single(&world).is_changed()); + assert!(query.single(&world).unwrap().is_changed()); } #[test] @@ -1395,14 +1663,12 @@ mod tests { this_run: Tick::new(4), }; let mut res = R {}; - #[cfg(feature = "track_location")] - let mut caller = Location::caller(); + let mut caller = MaybeLocation::caller(); let res_mut = ResMut { value: &mut res, ticks, - #[cfg(feature = "track_location")] - changed_by: &mut caller, + changed_by: caller.as_mut(), }; let into_mut: Mut = res_mut.into(); @@ -1419,8 +1685,7 @@ mod tests { changed: Tick::new(3), }; let mut res = R {}; - #[cfg(feature = "track_location")] - let mut caller = Location::caller(); + let mut caller = MaybeLocation::caller(); let val = Mut::new( &mut res, @@ -1428,8 +1693,7 @@ mod tests { &mut component_ticks.changed, Tick::new(2), // last_run Tick::new(4), // this_run - #[cfg(feature = "track_location")] - &mut caller, + caller.as_mut(), ); assert!(!val.is_added()); @@ -1449,14 +1713,12 @@ mod tests { this_run: Tick::new(4), }; let mut res = R {}; - #[cfg(feature = "track_location")] - let mut caller = Location::caller(); + let mut caller = MaybeLocation::caller(); let non_send_mut = NonSendMut { value: &mut res, ticks, - #[cfg(feature = "track_location")] - changed_by: &mut caller, + changed_by: caller.as_mut(), }; let into_mut: Mut = non_send_mut.into(); @@ -1485,14 +1747,12 @@ mod tests { }; let mut outer = Outer(0); - #[cfg(feature = "track_location")] - let mut caller = Location::caller(); + let mut caller = MaybeLocation::caller(); let ptr = Mut { value: &mut outer, ticks, - #[cfg(feature = "track_location")] - changed_by: &mut caller, + changed_by: caller.as_mut(), }; assert!(!ptr.is_changed()); @@ -1575,14 +1835,12 @@ mod tests { }; let mut value: i32 = 5; - #[cfg(feature = "track_location")] - let mut caller = Location::caller(); + let mut caller = MaybeLocation::caller(); let value = MutUntyped { value: PtrMut::from(&mut value), ticks, - #[cfg(feature = "track_location")] - changed_by: &mut caller, + changed_by: caller.as_mut(), }; let reflect_from_ptr = >::from_type(); @@ -1613,14 +1871,12 @@ mod tests { this_run: Tick::new(4), }; let mut c = C {}; - #[cfg(feature = "track_location")] - let mut caller = Location::caller(); + let mut caller = MaybeLocation::caller(); let mut_typed = Mut { value: &mut c, ticks, - #[cfg(feature = "track_location")] - changed_by: &mut caller, + changed_by: caller.as_mut(), }; let into_mut: MutUntyped = mut_typed.into(); diff --git a/crates/bevy_ecs/src/component.rs b/crates/bevy_ecs/src/component.rs index f8e1ee5a69..bfa55804b6 100644 --- a/crates/bevy_ecs/src/component.rs +++ b/crates/bevy_ecs/src/component.rs @@ -3,20 +3,23 @@ use crate::{ archetype::ArchetypeFlags, bundle::BundleInfo, - change_detection::MAX_CHANGE_AGE, - entity::{ComponentCloneCtx, Entity}, + change_detection::{MaybeLocation, MAX_CHANGE_AGE}, + entity::{ComponentCloneCtx, Entity, EntityMapper, SourceComponent}, query::DebugCheckedUnwrap, + relationship::RelationshipHookMode, resource::Resource, storage::{SparseSetIndex, SparseSets, Table, TableRow}, - system::{Commands, Local, SystemParam}, + system::{Local, SystemParam}, world::{DeferredWorld, FromWorld, World}, }; -#[cfg(feature = "bevy_reflect")] use alloc::boxed::Box; use alloc::{borrow::Cow, format, vec::Vec}; pub use bevy_ecs_macros::Component; -use bevy_platform_support::collections::{HashMap, HashSet}; -use bevy_platform_support::sync::Arc; +use bevy_platform::sync::Arc; +use bevy_platform::{ + collections::{HashMap, HashSet}, + sync::PoisonError, +}; use bevy_ptr::{OwningPtr, UnsafeCellDeref}; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; @@ -28,13 +31,12 @@ use core::{ fmt::Debug, marker::PhantomData, mem::needs_drop, - panic::Location, + ops::{Deref, DerefMut}, }; use disqualified::ShortName; +use smallvec::SmallVec; use thiserror::Error; -pub use bevy_ecs_macros::require; - /// A data type that can be used to store data for an [entity]. /// /// `Component` is a [derivable trait]: this means that a data type can implement it by applying a `#[derive(Component)]` attribute to it. @@ -158,16 +160,73 @@ pub use bevy_ecs_macros::require; /// assert_eq!(&C(0), world.entity(id).get::().unwrap()); /// ``` /// -/// You can also define a custom constructor function or closure: +/// You can define inline component values that take the following forms: +/// ``` +/// # use bevy_ecs::prelude::*; +/// #[derive(Component)] +/// #[require( +/// B(1), // tuple structs +/// C { // named-field structs +/// x: 1, +/// ..Default::default() +/// }, +/// D::One, // enum variants +/// E::ONE, // associated consts +/// F::new(1) // constructors +/// )] +/// struct A; +/// +/// #[derive(Component, PartialEq, Eq, Debug)] +/// struct B(u8); +/// +/// #[derive(Component, PartialEq, Eq, Debug, Default)] +/// struct C { +/// x: u8, +/// y: u8, +/// } +/// +/// #[derive(Component, PartialEq, Eq, Debug)] +/// enum D { +/// Zero, +/// One, +/// } +/// +/// #[derive(Component, PartialEq, Eq, Debug)] +/// struct E(u8); +/// +/// impl E { +/// pub const ONE: Self = Self(1); +/// } +/// +/// #[derive(Component, PartialEq, Eq, Debug)] +/// struct F(u8); +/// +/// impl F { +/// fn new(value: u8) -> Self { +/// Self(value) +/// } +/// } +/// +/// # let mut world = World::default(); +/// let id = world.spawn(A).id(); +/// assert_eq!(&B(1), world.entity(id).get::().unwrap()); +/// assert_eq!(&C { x: 1, y: 0 }, world.entity(id).get::().unwrap()); +/// assert_eq!(&D::One, world.entity(id).get::().unwrap()); +/// assert_eq!(&E(1), world.entity(id).get::().unwrap()); +/// assert_eq!(&F(1), world.entity(id).get::().unwrap()); +/// ```` +/// +/// +/// You can also define arbitrary expressions by using `=` /// /// ``` /// # use bevy_ecs::prelude::*; /// #[derive(Component)] -/// #[require(C(init_c))] +/// #[require(C = init_c())] /// struct A; /// /// #[derive(Component, PartialEq, Eq, Debug)] -/// #[require(C(|| C(20)))] +/// #[require(C = C(20))] /// struct B; /// /// #[derive(Component, PartialEq, Eq, Debug)] @@ -178,6 +237,10 @@ pub use bevy_ecs_macros::require; /// } /// /// # let mut world = World::default(); +/// // This will implicitly also insert C with the init_c() constructor +/// let id = world.spawn(A).id(); +/// assert_eq!(&C(10), world.entity(id).get::().unwrap()); +/// /// // This will implicitly also insert C with the `|| C(20)` constructor closure /// let id = world.spawn(B).id(); /// assert_eq!(&C(20), world.entity(id).get::().unwrap()); @@ -218,13 +281,13 @@ pub use bevy_ecs_macros::require; /// struct X(usize); /// /// #[derive(Component, Default)] -/// #[require(X(|| X(1)))] +/// #[require(X(1))] /// struct Y; /// /// #[derive(Component)] /// #[require( /// Y, -/// X(|| X(2)), +/// X(2), /// )] /// struct Z; /// @@ -284,6 +347,23 @@ pub use bevy_ecs_macros::require; /// Note that requirements must currently be registered before the requiring component is inserted /// into the world for the first time. Registering requirements after this will lead to a panic. /// +/// # Relationships between Entities +/// +/// Sometimes it is useful to define relationships between entities. A common example is the +/// parent / child relationship. Since Components are how data is stored for Entities, one might +/// naturally think to create a Component which has a field of type [`Entity`]. +/// +/// To facilitate this pattern, Bevy provides the [`Relationship`](`crate::relationship::Relationship`) +/// trait. You can derive the [`Relationship`](`crate::relationship::Relationship`) and +/// [`RelationshipTarget`](`crate::relationship::RelationshipTarget`) traits in addition to the +/// Component trait in order to implement data driven relationships between entities, see the trait +/// docs for more details. +/// +/// In addition, Bevy provides canonical implementations of the parent / child relationship via the +/// [`ChildOf`](crate::hierarchy::ChildOf) [`Relationship`](crate::relationship::Relationship) and +/// the [`Children`](crate::hierarchy::Children) +/// [`RelationshipTarget`](crate::relationship::RelationshipTarget). +/// /// # Adding component's hooks /// /// See [`ComponentHooks`] for a detailed explanation of component's hooks. @@ -321,6 +401,25 @@ pub use bevy_ecs_macros::require; /// } /// ``` /// +/// This also supports function calls that yield closures +/// +/// ``` +/// # use bevy_ecs::component::{Component, HookContext}; +/// # use bevy_ecs::world::DeferredWorld; +/// # +/// #[derive(Component)] +/// #[component(on_add = my_msg_hook("hello"))] +/// #[component(on_despawn = my_msg_hook("yoink"))] +/// struct ComponentA; +/// +/// // a hook closure generating function +/// fn my_msg_hook(message: &'static str) -> impl Fn(DeferredWorld, HookContext) { +/// move |_world, _ctx| { +/// println!("{message}"); +/// } +/// } +/// ``` +/// /// # Implementing the trait for foreign types /// /// As a consequence of the [orphan rule], it is not possible to separate into two different crates the implementation of `Component` from the definition of a type. @@ -432,7 +531,7 @@ pub trait Component: Send + Sync + 'static { /// Registers required components. fn register_required_components( _component_id: ComponentId, - _components: &mut Components, + _components: &mut ComponentsRegistrator, _required_components: &mut RequiredComponents, _inheritance_depth: u16, _recursion_check_stack: &mut Vec, @@ -447,14 +546,21 @@ pub trait Component: Send + Sync + 'static { ComponentCloneBehavior::Default } - /// Visits entities stored on the component. + /// Maps the entities on this component using the given [`EntityMapper`]. This is used to remap entities in contexts like scenes and entity cloning. + /// When deriving [`Component`], this is populated by annotating fields containing entities with `#[entities]` + /// + /// ``` + /// # use bevy_ecs::{component::Component, entity::Entity}; + /// #[derive(Component)] + /// struct Inventory { + /// #[entities] + /// items: Vec + /// } + /// ``` + /// + /// Fields with `#[entities]` must implement [`MapEntities`](crate::entity::MapEntities). #[inline] - fn visit_entities(_this: &Self, _f: impl FnMut(Entity)) {} - - /// Returns pointers to every entity stored on the component. This will be used to remap entity references when this entity - /// is cloned. - #[inline] - fn visit_entities_mut(_this: &mut Self, _f: impl FnMut(&mut Entity)) {} + fn map_entities(_this: &mut Self, _mapper: &mut E) {} } mod private { @@ -544,7 +650,9 @@ pub struct HookContext { /// The [`ComponentId`] this hook was invoked for. pub component_id: ComponentId, /// The caller location is `Some` if the `track_caller` feature is enabled. - pub caller: Option<&'static Location<'static>>, + pub caller: MaybeLocation, + /// Configures how relationship hooks will run + pub relationship_hook_mode: RelationshipHookMode, } /// [`World`]-mutating functions that run as part of lifecycle events of a [`Component`]. @@ -567,7 +675,7 @@ pub struct HookContext { /// /// ``` /// use bevy_ecs::prelude::*; -/// use bevy_platform_support::collections::HashSet; +/// use bevy_platform::collections::HashSet; /// /// #[derive(Component)] /// struct MyTrackedComponent; @@ -910,7 +1018,7 @@ impl ComponentInfo { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] pub struct ComponentId(usize); @@ -1086,7 +1194,7 @@ impl ComponentDescriptor { } /// Function type that can be used to clone an entity. -pub type ComponentCloneFn = fn(&mut Commands, &mut ComponentCloneCtx); +pub type ComponentCloneFn = fn(&SourceComponent, &mut ComponentCloneCtx); /// The clone behavior to use when cloning a [`Component`]. #[derive(Clone, Debug, Default, PartialEq, Eq)] @@ -1098,11 +1206,6 @@ pub enum ComponentCloneBehavior { Ignore, /// Uses a custom [`ComponentCloneFn`]. Custom(ComponentCloneFn), - /// Uses a [`ComponentCloneFn`] that produces an empty version of the given relationship target. - // TODO: this exists so that the current scene spawning code can know when to skip these components. - // When we move to actually cloning entities in scene spawning code, this should be removed in favor of Custom, as the - // distinction will no longer be necessary. - RelationshipTarget(ComponentCloneFn), } impl ComponentCloneBehavior { @@ -1133,21 +1236,473 @@ impl ComponentCloneBehavior { match self { ComponentCloneBehavior::Default => default, ComponentCloneBehavior::Ignore => component_clone_ignore, - ComponentCloneBehavior::Custom(custom) - | ComponentCloneBehavior::RelationshipTarget(custom) => *custom, + ComponentCloneBehavior::Custom(custom) => *custom, } } } -/// Stores metadata associated with each kind of [`Component`] in a given [`World`]. -#[derive(Debug, Default)] -pub struct Components { - components: Vec, - indices: TypeIdMap, - resource_indices: TypeIdMap, +/// A queued component registration. +struct QueuedRegistration { + registrator: Box, + id: ComponentId, + descriptor: ComponentDescriptor, } -impl Components { +impl QueuedRegistration { + /// Creates the [`QueuedRegistration`]. + /// + /// # Safety + /// + /// [`ComponentId`] must be unique. + unsafe fn new( + id: ComponentId, + descriptor: ComponentDescriptor, + func: impl FnOnce(&mut ComponentsRegistrator, ComponentId, ComponentDescriptor) + 'static, + ) -> Self { + Self { + registrator: Box::new(func), + id, + descriptor, + } + } + + /// Performs the registration, returning the now valid [`ComponentId`]. + fn register(self, registrator: &mut ComponentsRegistrator) -> ComponentId { + (self.registrator)(registrator, self.id, self.descriptor); + self.id + } +} + +/// Allows queuing components to be registered. +#[derive(Default)] +pub struct QueuedComponents { + components: TypeIdMap, + resources: TypeIdMap, + dynamic_registrations: Vec, +} + +impl Debug for QueuedComponents { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let components = self + .components + .iter() + .map(|(type_id, queued)| (type_id, queued.id)) + .collect::>(); + let resources = self + .resources + .iter() + .map(|(type_id, queued)| (type_id, queued.id)) + .collect::>(); + let dynamic_registrations = self + .dynamic_registrations + .iter() + .map(|queued| queued.id) + .collect::>(); + write!(f, "components: {components:?}, resources: {resources:?}, dynamic_registrations: {dynamic_registrations:?}") + } +} + +/// Generates [`ComponentId`]s. +#[derive(Debug, Default)] +pub struct ComponentIds { + next: bevy_platform::sync::atomic::AtomicUsize, +} + +impl ComponentIds { + /// Peeks the next [`ComponentId`] to be generated without generating it. + pub fn peek(&self) -> ComponentId { + ComponentId( + self.next + .load(bevy_platform::sync::atomic::Ordering::Relaxed), + ) + } + + /// Generates and returns the next [`ComponentId`]. + pub fn next(&self) -> ComponentId { + ComponentId( + self.next + .fetch_add(1, bevy_platform::sync::atomic::Ordering::Relaxed), + ) + } + + /// Peeks the next [`ComponentId`] to be generated without generating it. + pub fn peek_mut(&mut self) -> ComponentId { + ComponentId(*self.next.get_mut()) + } + + /// Generates and returns the next [`ComponentId`]. + pub fn next_mut(&mut self) -> ComponentId { + let id = self.next.get_mut(); + let result = ComponentId(*id); + *id += 1; + result + } + + /// Returns the number of [`ComponentId`]s generated. + pub fn len(&self) -> usize { + self.peek().0 + } + + /// Returns true if and only if no ids have been generated. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +/// A type that enables queuing registration in [`Components`]. +/// +/// # Note +/// +/// These queued registrations return [`ComponentId`]s. +/// These ids are not yet valid, but they will become valid +/// when either [`ComponentsRegistrator::apply_queued_registrations`] is called or the same registration is made directly. +/// In either case, the returned [`ComponentId`]s will be correct, but they are not correct yet. +/// +/// Generally, that means these [`ComponentId`]s can be safely used for read-only purposes. +/// Modifying the contents of the world through these [`ComponentId`]s directly without waiting for them to be fully registered +/// and without then confirming that they have been fully registered is not supported. +/// Hence, extra care is needed with these [`ComponentId`]s to ensure all safety rules are followed. +/// +/// As a rule of thumb, if you have mutable access to [`ComponentsRegistrator`], prefer to use that instead. +/// Use this only if you need to know the id of a component but do not need to modify the contents of the world based on that id. +#[derive(Clone, Copy)] +pub struct ComponentsQueuedRegistrator<'w> { + components: &'w Components, + ids: &'w ComponentIds, +} + +impl Deref for ComponentsQueuedRegistrator<'_> { + type Target = Components; + + fn deref(&self) -> &Self::Target { + self.components + } +} + +impl<'w> ComponentsQueuedRegistrator<'w> { + /// Constructs a new [`ComponentsQueuedRegistrator`]. + /// + /// # Safety + /// + /// The [`Components`] and [`ComponentIds`] must match. + /// For example, they must be from the same world. + pub unsafe fn new(components: &'w Components, ids: &'w ComponentIds) -> Self { + Self { components, ids } + } + + /// Queues this function to run as a component registrator. + /// + /// # Safety + /// + /// The [`TypeId`] must not already be registered or queued as a component. + unsafe fn force_register_arbitrary_component( + &self, + type_id: TypeId, + descriptor: ComponentDescriptor, + func: impl FnOnce(&mut ComponentsRegistrator, ComponentId, ComponentDescriptor) + 'static, + ) -> ComponentId { + let id = self.ids.next(); + self.components + .queued + .write() + .unwrap_or_else(PoisonError::into_inner) + .components + .insert( + type_id, + // SAFETY: The id was just generated. + unsafe { QueuedRegistration::new(id, descriptor, func) }, + ); + id + } + + /// Queues this function to run as a resource registrator. + /// + /// # Safety + /// + /// The [`TypeId`] must not already be registered or queued as a resource. + unsafe fn force_register_arbitrary_resource( + &self, + type_id: TypeId, + descriptor: ComponentDescriptor, + func: impl FnOnce(&mut ComponentsRegistrator, ComponentId, ComponentDescriptor) + 'static, + ) -> ComponentId { + let id = self.ids.next(); + self.components + .queued + .write() + .unwrap_or_else(PoisonError::into_inner) + .resources + .insert( + type_id, + // SAFETY: The id was just generated. + unsafe { QueuedRegistration::new(id, descriptor, func) }, + ); + id + } + + /// Queues this function to run as a dynamic registrator. + fn force_register_arbitrary_dynamic( + &self, + descriptor: ComponentDescriptor, + func: impl FnOnce(&mut ComponentsRegistrator, ComponentId, ComponentDescriptor) + 'static, + ) -> ComponentId { + let id = self.ids.next(); + self.components + .queued + .write() + .unwrap_or_else(PoisonError::into_inner) + .dynamic_registrations + .push( + // SAFETY: The id was just generated. + unsafe { QueuedRegistration::new(id, descriptor, func) }, + ); + id + } + + /// This is a queued version of [`ComponentsRegistrator::register_component`]. + /// This will reserve an id and queue the registration. + /// These registrations will be carried out at the next opportunity. + /// + /// If this has already been registered or queued, this returns the previous [`ComponentId`]. + /// + /// # Note + /// + /// Technically speaking, the returned [`ComponentId`] is not valid, but it will become valid later. + /// See type level docs for details. + #[inline] + pub fn queue_register_component(&self) -> ComponentId { + self.component_id::().unwrap_or_else(|| { + // SAFETY: We just checked that this type was not in the queue. + unsafe { + self.force_register_arbitrary_component( + TypeId::of::(), + ComponentDescriptor::new::(), + |registrator, id, _descriptor| { + // SAFETY: We just checked that this is not currently registered or queued, and if it was registered since, this would have been dropped from the queue. + #[expect(unused_unsafe, reason = "More precise to specify.")] + unsafe { + registrator.register_component_unchecked::(&mut Vec::new(), id); + } + }, + ) + } + }) + } + + /// This is a queued version of [`ComponentsRegistrator::register_component_with_descriptor`]. + /// This will reserve an id and queue the registration. + /// These registrations will be carried out at the next opportunity. + /// + /// # Note + /// + /// Technically speaking, the returned [`ComponentId`] is not valid, but it will become valid later. + /// See type level docs for details. + #[inline] + pub fn queue_register_component_with_descriptor( + &self, + descriptor: ComponentDescriptor, + ) -> ComponentId { + self.force_register_arbitrary_dynamic(descriptor, |registrator, id, descriptor| { + // SAFETY: Id uniqueness handled by caller. + unsafe { + registrator.register_component_inner(id, descriptor); + } + }) + } + + /// This is a queued version of [`ComponentsRegistrator::register_resource`]. + /// This will reserve an id and queue the registration. + /// These registrations will be carried out at the next opportunity. + /// + /// If this has already been registered or queued, this returns the previous [`ComponentId`]. + /// + /// # Note + /// + /// Technically speaking, the returned [`ComponentId`] is not valid, but it will become valid later. + /// See type level docs for details. + #[inline] + pub fn queue_register_resource(&self) -> ComponentId { + let type_id = TypeId::of::(); + self.get_resource_id(type_id).unwrap_or_else(|| { + // SAFETY: We just checked that this type was not in the queue. + unsafe { + self.force_register_arbitrary_resource( + type_id, + ComponentDescriptor::new_resource::(), + move |registrator, id, descriptor| { + // SAFETY: We just checked that this is not currently registered or queued, and if it was registered since, this would have been dropped from the queue. + // SAFETY: Id uniqueness handled by caller, and the type_id matches descriptor. + #[expect(unused_unsafe, reason = "More precise to specify.")] + unsafe { + registrator.register_resource_unchecked(type_id, id, descriptor); + } + }, + ) + } + }) + } + + /// This is a queued version of [`ComponentsRegistrator::register_non_send`]. + /// This will reserve an id and queue the registration. + /// These registrations will be carried out at the next opportunity. + /// + /// If this has already been registered or queued, this returns the previous [`ComponentId`]. + /// + /// # Note + /// + /// Technically speaking, the returned [`ComponentId`] is not valid, but it will become valid later. + /// See type level docs for details. + #[inline] + pub fn queue_register_non_send(&self) -> ComponentId { + let type_id = TypeId::of::(); + self.get_resource_id(type_id).unwrap_or_else(|| { + // SAFETY: We just checked that this type was not in the queue. + unsafe { + self.force_register_arbitrary_resource( + type_id, + ComponentDescriptor::new_non_send::(StorageType::default()), + move |registrator, id, descriptor| { + // SAFETY: We just checked that this is not currently registered or queued, and if it was registered since, this would have been dropped from the queue. + // SAFETY: Id uniqueness handled by caller, and the type_id matches descriptor. + #[expect(unused_unsafe, reason = "More precise to specify.")] + unsafe { + registrator.register_resource_unchecked(type_id, id, descriptor); + } + }, + ) + } + }) + } + + /// This is a queued version of [`ComponentsRegistrator::register_resource_with_descriptor`]. + /// This will reserve an id and queue the registration. + /// These registrations will be carried out at the next opportunity. + /// + /// # Note + /// + /// Technically speaking, the returned [`ComponentId`] is not valid, but it will become valid later. + /// See type level docs for details. + #[inline] + pub fn queue_register_resource_with_descriptor( + &self, + descriptor: ComponentDescriptor, + ) -> ComponentId { + self.force_register_arbitrary_dynamic(descriptor, |registrator, id, descriptor| { + // SAFETY: Id uniqueness handled by caller. + unsafe { + registrator.register_component_inner(id, descriptor); + } + }) + } +} + +/// A [`Components`] wrapper that enables additional features, like registration. +pub struct ComponentsRegistrator<'w> { + components: &'w mut Components, + ids: &'w mut ComponentIds, +} + +impl Deref for ComponentsRegistrator<'_> { + type Target = Components; + + fn deref(&self) -> &Self::Target { + self.components + } +} + +impl DerefMut for ComponentsRegistrator<'_> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.components + } +} + +impl<'w> ComponentsRegistrator<'w> { + /// Constructs a new [`ComponentsRegistrator`]. + /// + /// # Safety + /// + /// The [`Components`] and [`ComponentIds`] must match. + /// For example, they must be from the same world. + pub unsafe fn new(components: &'w mut Components, ids: &'w mut ComponentIds) -> Self { + Self { components, ids } + } + + /// Converts this [`ComponentsRegistrator`] into a [`ComponentsQueuedRegistrator`]. + /// This is intended for use to pass this value to a function that requires [`ComponentsQueuedRegistrator`]. + /// It is generally not a good idea to queue a registration when you can instead register directly on this type. + pub fn as_queued(&self) -> ComponentsQueuedRegistrator<'_> { + // SAFETY: ensured by the caller that created self. + unsafe { ComponentsQueuedRegistrator::new(self.components, self.ids) } + } + + /// Applies every queued registration. + /// This ensures that every valid [`ComponentId`] is registered, + /// enabling retrieving [`ComponentInfo`], etc. + pub fn apply_queued_registrations(&mut self) { + if !self.any_queued_mut() { + return; + } + + // Note: + // + // This is not just draining the queue. We need to empty the queue without removing the information from `Components`. + // If we drained directly, we could break invariance. + // + // For example, say `ComponentA` and `ComponentB` are queued, and `ComponentA` requires `ComponentB`. + // If we drain directly, and `ComponentA` was the first to be registered, then, when `ComponentA` + // registers `ComponentB` in `Component::register_required_components`, + // `Components` will not know that `ComponentB` was queued + // (since it will have been drained from the queue.) + // If that happened, `Components` would assign a new `ComponentId` to `ComponentB` + // which would be *different* than the id it was assigned in the queue. + // Then, when the drain iterator gets to `ComponentB`, + // it would be unsafely registering `ComponentB`, which is already registered. + // + // As a result, we need to pop from each queue one by one instead of draining. + + // components + while let Some(registrator) = { + let queued = self + .components + .queued + .get_mut() + .unwrap_or_else(PoisonError::into_inner); + queued.components.keys().next().copied().map(|type_id| { + // SAFETY: the id just came from a valid iterator. + unsafe { queued.components.remove(&type_id).debug_checked_unwrap() } + }) + } { + registrator.register(self); + } + + // resources + while let Some(registrator) = { + let queued = self + .components + .queued + .get_mut() + .unwrap_or_else(PoisonError::into_inner); + queued.resources.keys().next().copied().map(|type_id| { + // SAFETY: the id just came from a valid iterator. + unsafe { queued.resources.remove(&type_id).debug_checked_unwrap() } + }) + } { + registrator.register(self); + } + + // dynamic + let queued = &mut self + .components + .queued + .get_mut() + .unwrap_or_else(PoisonError::into_inner); + if !queued.dynamic_registrations.is_empty() { + for registrator in core::mem::take(&mut queued.dynamic_registrations) { + registrator.register(self); + } + } + } + /// Registers a [`Component`] of type `T` with this instance. /// If a component of this type has already been registered, this will return /// the ID of the pre-existing component. @@ -1155,57 +1710,90 @@ impl Components { /// # See also /// /// * [`Components::component_id()`] - /// * [`Components::register_component_with_descriptor()`] + /// * [`ComponentsRegistrator::register_component_with_descriptor()`] #[inline] pub fn register_component(&mut self) -> ComponentId { - self.register_component_internal::(&mut Vec::new()) + self.register_component_checked::(&mut Vec::new()) } + /// Same as [`Self::register_component_unchecked`] but keeps a checks for safety. #[inline] - fn register_component_internal( + fn register_component_checked( &mut self, recursion_check_stack: &mut Vec, ) -> ComponentId { - let mut is_new_registration = false; - let id = { - let Components { - indices, - components, - .. - } = self; - let type_id = TypeId::of::(); - *indices.entry(type_id).or_insert_with(|| { - let id = Components::register_component_inner( - components, - ComponentDescriptor::new::(), - ); - is_new_registration = true; - id - }) - }; - if is_new_registration { - let mut required_components = RequiredComponents::default(); - T::register_required_components( - id, - self, - &mut required_components, - 0, - recursion_check_stack, - ); - let info = &mut self.components[id.index()]; + let type_id = TypeId::of::(); + if let Some(id) = self.indices.get(&type_id) { + return *id; + } - #[expect( - deprecated, - reason = "need to use this method until it is removed to ensure user defined components register hooks correctly" - )] - // TODO: Replace with `info.hooks.update_from_component::();` once `Component::register_component_hooks` is removed - T::register_component_hooks(&mut info.hooks); + if let Some(registrator) = self + .components + .queued + .get_mut() + .unwrap_or_else(PoisonError::into_inner) + .components + .remove(&type_id) + { + // If we are trying to register something that has already been queued, we respect the queue. + // Just like if we are trying to register something that already is, we respect the first registration. + return registrator.register(self); + } - info.required_components = required_components; + let id = self.ids.next_mut(); + // SAFETY: The component is not currently registered, and the id is fresh. + unsafe { + self.register_component_unchecked::(recursion_check_stack, id); } id } + /// # Safety + /// + /// Neither this component, nor its id may be registered or queued. This must be a new registration. + #[inline] + unsafe fn register_component_unchecked( + &mut self, + recursion_check_stack: &mut Vec, + id: ComponentId, + ) { + // SAFETY: ensured by caller. + unsafe { + self.register_component_inner(id, ComponentDescriptor::new::()); + } + let type_id = TypeId::of::(); + let prev = self.indices.insert(type_id, id); + debug_assert!(prev.is_none()); + + let mut required_components = RequiredComponents::default(); + T::register_required_components( + id, + self, + &mut required_components, + 0, + recursion_check_stack, + ); + // SAFETY: we just inserted it in `register_component_inner` + let info = unsafe { + &mut self + .components + .components + .get_mut(id.0) + .debug_checked_unwrap() + .as_mut() + .debug_checked_unwrap() + }; + + #[expect( + deprecated, + reason = "need to use this method until it is removed to ensure user defined components register hooks correctly" + )] + // TODO: Replace with `info.hooks.update_from_component::();` once `Component::register_component_hooks` is removed + T::register_component_hooks(&mut info.hooks); + + info.required_components = required_components; + } + /// Registers a component described by `descriptor`. /// /// # Note @@ -1216,67 +1804,320 @@ impl Components { /// # See also /// /// * [`Components::component_id()`] - /// * [`Components::register_component()`] + /// * [`ComponentsRegistrator::register_component()`] + #[inline] pub fn register_component_with_descriptor( &mut self, descriptor: ComponentDescriptor, ) -> ComponentId { - Components::register_component_inner(&mut self.components, descriptor) + let id = self.ids.next_mut(); + // SAFETY: The id is fresh. + unsafe { + self.register_component_inner(id, descriptor); + } + id } + // NOTE: This should maybe be private, but it is currently public so that `bevy_ecs_macros` can use it. + // We can't directly move this there either, because this uses `Components::get_required_by_mut`, + // which is private, and could be equally risky to expose to users. + /// Registers the given component `R` and [required components] inherited from it as required by `T`, + /// and adds `T` to their lists of requirees. + /// + /// The given `inheritance_depth` determines how many levels of inheritance deep the requirement is. + /// A direct requirement has a depth of `0`, and each level of inheritance increases the depth by `1`. + /// Lower depths are more specific requirements, and can override existing less specific registrations. + /// + /// The `recursion_check_stack` allows checking whether this component tried to register itself as its + /// own (indirect) required component. + /// + /// This method does *not* register any components as required by components that require `T`. + /// + /// Only use this method if you know what you are doing. In most cases, you should instead use [`World::register_required_components`], + /// or the equivalent method in `bevy_app::App`. + /// + /// [required component]: Component#required-components + #[doc(hidden)] + pub fn register_required_components_manual( + &mut self, + required_components: &mut RequiredComponents, + constructor: fn() -> R, + inheritance_depth: u16, + recursion_check_stack: &mut Vec, + ) { + let requiree = self.register_component_checked::(recursion_check_stack); + let required = self.register_component_checked::(recursion_check_stack); + + // SAFETY: We just created the components. + unsafe { + self.register_required_components_manual_unchecked::( + requiree, + required, + required_components, + constructor, + inheritance_depth, + ); + } + } + + /// Registers a [`Resource`] of type `T` with this instance. + /// If a resource of this type has already been registered, this will return + /// the ID of the pre-existing resource. + /// + /// # See also + /// + /// * [`Components::resource_id()`] + /// * [`ComponentsRegistrator::register_resource_with_descriptor()`] #[inline] - fn register_component_inner( - components: &mut Vec, + pub fn register_resource(&mut self) -> ComponentId { + // SAFETY: The [`ComponentDescriptor`] matches the [`TypeId`] + unsafe { + self.register_resource_with(TypeId::of::(), || { + ComponentDescriptor::new_resource::() + }) + } + } + + /// Registers a [non-send resource](crate::system::NonSend) of type `T` with this instance. + /// If a resource of this type has already been registered, this will return + /// the ID of the pre-existing resource. + #[inline] + pub fn register_non_send(&mut self) -> ComponentId { + // SAFETY: The [`ComponentDescriptor`] matches the [`TypeId`] + unsafe { + self.register_resource_with(TypeId::of::(), || { + ComponentDescriptor::new_non_send::(StorageType::default()) + }) + } + } + + /// Same as [`Components::register_resource_unchecked`] but handles safety. + /// + /// # Safety + /// + /// The [`ComponentDescriptor`] must match the [`TypeId`]. + #[inline] + unsafe fn register_resource_with( + &mut self, + type_id: TypeId, + descriptor: impl FnOnce() -> ComponentDescriptor, + ) -> ComponentId { + if let Some(id) = self.resource_indices.get(&type_id) { + return *id; + } + + if let Some(registrator) = self + .components + .queued + .get_mut() + .unwrap_or_else(PoisonError::into_inner) + .resources + .remove(&type_id) + { + // If we are trying to register something that has already been queued, we respect the queue. + // Just like if we are trying to register something that already is, we respect the first registration. + return registrator.register(self); + } + + let id = self.ids.next_mut(); + // SAFETY: The resource is not currently registered, the id is fresh, and the [`ComponentDescriptor`] matches the [`TypeId`] + unsafe { + self.register_resource_unchecked(type_id, id, descriptor()); + } + id + } + + /// Registers a [`Resource`] described by `descriptor`. + /// + /// # Note + /// + /// If this method is called multiple times with identical descriptors, a distinct [`ComponentId`] + /// will be created for each one. + /// + /// # See also + /// + /// * [`Components::resource_id()`] + /// * [`ComponentsRegistrator::register_resource()`] + #[inline] + pub fn register_resource_with_descriptor( + &mut self, descriptor: ComponentDescriptor, ) -> ComponentId { - let component_id = ComponentId(components.len()); - let info = ComponentInfo::new(component_id, descriptor); - components.push(info); - component_id + let id = self.ids.next_mut(); + // SAFETY: The id is fresh. + unsafe { + self.register_component_inner(id, descriptor); + } + id + } +} + +/// Stores metadata associated with each kind of [`Component`] in a given [`World`]. +#[derive(Debug, Default)] +pub struct Components { + components: Vec>, + indices: TypeIdMap, + resource_indices: TypeIdMap, + // This is kept internal and local to verify that no deadlocks can occor. + queued: bevy_platform::sync::RwLock, +} + +impl Components { + /// This registers any descriptor, component or resource. + /// + /// # Safety + /// + /// The id must have never been registered before. This must be a fresh registration. + #[inline] + unsafe fn register_component_inner( + &mut self, + id: ComponentId, + descriptor: ComponentDescriptor, + ) { + let info = ComponentInfo::new(id, descriptor); + let least_len = id.0 + 1; + if self.components.len() < least_len { + self.components.resize_with(least_len, || None); + } + // SAFETY: We just extended the vec to make this index valid. + let slot = unsafe { self.components.get_mut(id.0).debug_checked_unwrap() }; + // Caller ensures id is unique + debug_assert!(slot.is_none()); + *slot = Some(info); + } + + /// Returns the number of components registered or queued with this instance. + #[inline] + pub fn len(&self) -> usize { + self.num_queued() + self.num_registered() + } + + /// Returns `true` if there are no components registered or queued with this instance. Otherwise, this returns `false`. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 } /// Returns the number of components registered with this instance. #[inline] - pub fn len(&self) -> usize { + pub fn num_queued(&self) -> usize { + let queued = self.queued.read().unwrap_or_else(PoisonError::into_inner); + queued.components.len() + queued.dynamic_registrations.len() + queued.resources.len() + } + + /// Returns `true` if there are any components registered with this instance. Otherwise, this returns `false`. + #[inline] + pub fn any_queued(&self) -> bool { + self.num_queued() > 0 + } + + /// A faster version of [`Self::num_queued`]. + #[inline] + pub fn num_queued_mut(&mut self) -> usize { + let queued = self + .queued + .get_mut() + .unwrap_or_else(PoisonError::into_inner); + queued.components.len() + queued.dynamic_registrations.len() + queued.resources.len() + } + + /// A faster version of [`Self::any_queued`]. + #[inline] + pub fn any_queued_mut(&mut self) -> bool { + self.num_queued_mut() > 0 + } + + /// Returns the number of components registered with this instance. + #[inline] + pub fn num_registered(&self) -> usize { self.components.len() } - /// Returns `true` if there are no components registered with this instance. Otherwise, this returns `false`. + /// Returns `true` if there are any components registered with this instance. Otherwise, this returns `false`. #[inline] - pub fn is_empty(&self) -> bool { - self.components.len() == 0 + pub fn any_registered(&self) -> bool { + self.num_registered() > 0 } - /// Gets the metadata associated with the given component. + /// Gets the metadata associated with the given component, if it is registered. + /// This will return `None` if the id is not regiserted or is queued. /// /// This will return an incorrect result if `id` did not come from the same world as `self`. It may return `None` or a garbage value. #[inline] pub fn get_info(&self, id: ComponentId) -> Option<&ComponentInfo> { - self.components.get(id.0) + self.components.get(id.0).and_then(|info| info.as_ref()) } - /// Returns the name associated with the given component. + /// Gets the [`ComponentDescriptor`] of the component with this [`ComponentId`] if it is present. + /// This will return `None` only if the id is neither regisered nor queued to be registered. + /// + /// Currently, the [`Cow`] will be [`Cow::Owned`] if and only if the component is queued. It will be [`Cow::Borrowed`] otherwise. /// /// This will return an incorrect result if `id` did not come from the same world as `self`. It may return `None` or a garbage value. #[inline] - pub fn get_name(&self, id: ComponentId) -> Option<&str> { - self.get_info(id).map(ComponentInfo::name) + pub fn get_descriptor<'a>(&'a self, id: ComponentId) -> Option> { + self.components + .get(id.0) + .and_then(|info| info.as_ref().map(|info| Cow::Borrowed(&info.descriptor))) + .or_else(|| { + let queued = self.queued.read().unwrap_or_else(PoisonError::into_inner); + // first check components, then resources, then dynamic + queued + .components + .values() + .chain(queued.resources.values()) + .chain(queued.dynamic_registrations.iter()) + .find(|queued| queued.id == id) + .map(|queued| Cow::Owned(queued.descriptor.clone())) + }) + } + + /// Gets the name of the component with this [`ComponentId`] if it is present. + /// This will return `None` only if the id is neither regisered nor queued to be registered. + /// + /// This will return an incorrect result if `id` did not come from the same world as `self`. It may return `None` or a garbage value. + #[inline] + pub fn get_name<'a>(&'a self, id: ComponentId) -> Option> { + self.components + .get(id.0) + .and_then(|info| { + info.as_ref() + .map(|info| Cow::Borrowed(info.descriptor.name())) + }) + .or_else(|| { + let queued = self.queued.read().unwrap_or_else(PoisonError::into_inner); + // first check components, then resources, then dynamic + queued + .components + .values() + .chain(queued.resources.values()) + .chain(queued.dynamic_registrations.iter()) + .find(|queued| queued.id == id) + .map(|queued| queued.descriptor.name.clone()) + }) } /// Gets the metadata associated with the given component. /// # Safety /// - /// `id` must be a valid [`ComponentId`] + /// `id` must be a valid and fully registered [`ComponentId`]. #[inline] pub unsafe fn get_info_unchecked(&self, id: ComponentId) -> &ComponentInfo { - debug_assert!(id.index() < self.components.len()); // SAFETY: The caller ensures `id` is valid. - unsafe { self.components.get_unchecked(id.0) } + unsafe { + self.components + .get(id.0) + .debug_checked_unwrap() + .as_ref() + .debug_checked_unwrap() + } } #[inline] pub(crate) fn get_hooks_mut(&mut self, id: ComponentId) -> Option<&mut ComponentHooks> { - self.components.get_mut(id.0).map(|info| &mut info.hooks) + self.components + .get_mut(id.0) + .and_then(|info| info.as_mut().map(|info| &mut info.hooks)) } #[inline] @@ -1286,7 +2127,7 @@ impl Components { ) -> Option<&mut RequiredComponents> { self.components .get_mut(id.0) - .map(|info| &mut info.required_components) + .and_then(|info| info.as_mut().map(|info| &mut info.required_components)) } /// Registers the given component `R` and [required components] inherited from it as required by `T`. @@ -1338,12 +2179,28 @@ impl Components { let required_by = unsafe { self.get_required_by_mut(required).debug_checked_unwrap() }; required_by.insert(requiree); + let mut required_components_tmp = RequiredComponents::default(); // SAFETY: The caller ensures that the `requiree` and `required` components are valid. - let inherited_requirements = - unsafe { self.register_inherited_required_components(requiree, required) }; + let inherited_requirements = unsafe { + self.register_inherited_required_components( + requiree, + required, + &mut required_components_tmp, + ) + }; + + // SAFETY: The caller ensures that the `requiree` is valid. + let required_components = unsafe { + self.get_required_components_mut(requiree) + .debug_checked_unwrap() + }; + required_components.0.extend(required_components_tmp.0); // Propagate the new required components up the chain to all components that require the requiree. - if let Some(required_by) = self.get_required_by(requiree).cloned() { + if let Some(required_by) = self + .get_required_by(requiree) + .map(|set| set.iter().copied().collect::>()) + { // `required` is now required by anything that `requiree` was required by. self.get_required_by_mut(required) .unwrap() @@ -1369,10 +2226,10 @@ impl Components { // SAFETY: Component ID and constructor match the ones on the original requiree. // The original requiree is responsible for making sure the registration is safe. unsafe { - required_components.register_dynamic( + required_components.register_dynamic_with( *component_id, - component.constructor.clone(), component.inheritance_depth + depth + 1, + || component.constructor.clone(), ); }; } @@ -1392,6 +2249,7 @@ impl Components { &mut self, requiree: ComponentId, required: ComponentId, + required_components: &mut RequiredComponents, ) -> Vec<(ComponentId, RequiredComponent)> { // Get required components inherited from the `required` component. // SAFETY: The caller ensures that the `required` component is valid. @@ -1414,27 +2272,21 @@ impl Components { .collect(); // Register the new required components. - for (component_id, component) in inherited_requirements.iter().cloned() { - // SAFETY: The caller ensures that the `requiree` is valid. - let required_components = unsafe { - self.get_required_components_mut(requiree) - .debug_checked_unwrap() - }; - + for (component_id, component) in inherited_requirements.iter() { // Register the required component for the requiree. // SAFETY: Component ID and constructor match the ones on the original requiree. unsafe { - required_components.register_dynamic( - component_id, - component.constructor, + required_components.register_dynamic_with( + *component_id, component.inheritance_depth, + || component.constructor.clone(), ); }; // Add the requiree to the list of components that require the required component. // SAFETY: The caller ensures that the required components are valid. let required_by = unsafe { - self.get_required_by_mut(component_id) + self.get_required_by_mut(*component_id) .debug_checked_unwrap() }; required_by.insert(requiree); @@ -1443,48 +2295,6 @@ impl Components { inherited_requirements } - // NOTE: This should maybe be private, but it is currently public so that `bevy_ecs_macros` can use it. - // We can't directly move this there either, because this uses `Components::get_required_by_mut`, - // which is private, and could be equally risky to expose to users. - /// Registers the given component `R` and [required components] inherited from it as required by `T`, - /// and adds `T` to their lists of requirees. - /// - /// The given `inheritance_depth` determines how many levels of inheritance deep the requirement is. - /// A direct requirement has a depth of `0`, and each level of inheritance increases the depth by `1`. - /// Lower depths are more specific requirements, and can override existing less specific registrations. - /// - /// The `recursion_check_stack` allows checking whether this component tried to register itself as its - /// own (indirect) required component. - /// - /// This method does *not* register any components as required by components that require `T`. - /// - /// Only use this method if you know what you are doing. In most cases, you should instead use [`World::register_required_components`], - /// or the equivalent method in `bevy_app::App`. - /// - /// [required component]: Component#required-components - #[doc(hidden)] - pub fn register_required_components_manual( - &mut self, - required_components: &mut RequiredComponents, - constructor: fn() -> R, - inheritance_depth: u16, - recursion_check_stack: &mut Vec, - ) { - let requiree = self.register_component_internal::(recursion_check_stack); - let required = self.register_component_internal::(recursion_check_stack); - - // SAFETY: We just created the components. - unsafe { - self.register_required_components_manual_unchecked::( - requiree, - required, - required_components, - constructor, - inheritance_depth, - ); - } - } - /// Registers the given component `R` and [required components] inherited from it as required by `T`, /// and adds `T` to their lists of requirees. /// @@ -1521,31 +2331,14 @@ impl Components { let required_by = unsafe { self.get_required_by_mut(required).debug_checked_unwrap() }; required_by.insert(requiree); - // Register the inherited required components for the requiree. - let required: Vec<(ComponentId, RequiredComponent)> = self - .get_info(required) - .unwrap() - .required_components() - .0 - .iter() - .map(|(id, component)| (*id, component.clone())) - .collect(); - - for (id, component) in required { - // Register the inherited required components for the requiree. - // The inheritance depth is increased by `1` since this is a component required by the original required component. - required_components.register_dynamic( - id, - component.constructor.clone(), - component.inheritance_depth + 1, - ); - self.get_required_by_mut(id).unwrap().insert(requiree); - } + self.register_inherited_required_components(requiree, required, required_components); } #[inline] pub(crate) fn get_required_by(&self, id: ComponentId) -> Option<&HashSet> { - self.components.get(id.0).map(|info| &info.required_by) + self.components + .get(id.0) + .and_then(|info| info.as_ref().map(|info| &info.required_by)) } #[inline] @@ -1555,13 +2348,91 @@ impl Components { ) -> Option<&mut HashSet> { self.components .get_mut(id.0) - .map(|info| &mut info.required_by) + .and_then(|info| info.as_mut().map(|info| &mut info.required_by)) + } + + /// Returns true if the [`ComponentId`] is fully registered and valid. + /// Ids may be invalid if they are still queued to be registered. + /// Those ids are still correct, but they are not usable in every context yet. + #[inline] + pub fn is_id_valid(&self, id: ComponentId) -> bool { + self.components.get(id.0).is_some_and(Option::is_some) + } + + /// Type-erased equivalent of [`Components::valid_component_id()`]. + #[inline] + pub fn get_valid_id(&self, type_id: TypeId) -> Option { + self.indices.get(&type_id).copied() + } + + /// Returns the [`ComponentId`] of the given [`Component`] type `T` if it is fully registered. + /// If you want to include queued registration, see [`Components::component_id()`]. + /// + /// ``` + /// use bevy_ecs::prelude::*; + /// + /// let mut world = World::new(); + /// + /// #[derive(Component)] + /// struct ComponentA; + /// + /// let component_a_id = world.register_component::(); + /// + /// assert_eq!(component_a_id, world.components().valid_component_id::().unwrap()) + /// ``` + /// + /// # See also + /// + /// * [`Components::get_valid_id()`] + /// * [`Components::valid_resource_id()`] + /// * [`World::component_id()`] + #[inline] + pub fn valid_component_id(&self) -> Option { + self.get_id(TypeId::of::()) + } + + /// Type-erased equivalent of [`Components::valid_resource_id()`]. + #[inline] + pub fn get_valid_resource_id(&self, type_id: TypeId) -> Option { + self.resource_indices.get(&type_id).copied() + } + + /// Returns the [`ComponentId`] of the given [`Resource`] type `T` if it is fully registered. + /// If you want to include queued registration, see [`Components::resource_id()`]. + /// + /// ``` + /// use bevy_ecs::prelude::*; + /// + /// let mut world = World::new(); + /// + /// #[derive(Resource, Default)] + /// struct ResourceA; + /// + /// let resource_a_id = world.init_resource::(); + /// + /// assert_eq!(resource_a_id, world.components().valid_resource_id::().unwrap()) + /// ``` + /// + /// # See also + /// + /// * [`Components::valid_component_id()`] + /// * [`Components::get_resource_id()`] + #[inline] + pub fn valid_resource_id(&self) -> Option { + self.get_resource_id(TypeId::of::()) } /// Type-erased equivalent of [`Components::component_id()`]. #[inline] pub fn get_id(&self, type_id: TypeId) -> Option { - self.indices.get(&type_id).copied() + self.indices.get(&type_id).copied().or_else(|| { + self.queued + .read() + .unwrap_or_else(PoisonError::into_inner) + .components + .get(&type_id) + .map(|queued| queued.id) + }) } /// Returns the [`ComponentId`] of the given [`Component`] type `T`. @@ -1571,7 +2442,7 @@ impl Components { /// instance. /// /// Returns [`None`] if the `Component` type has not - /// yet been initialized using [`Components::register_component()`]. + /// yet been initialized using [`ComponentsRegistrator::register_component()`] or [`ComponentsQueuedRegistrator::queue_register_component()`]. /// /// ``` /// use bevy_ecs::prelude::*; @@ -1599,7 +2470,14 @@ impl Components { /// Type-erased equivalent of [`Components::resource_id()`]. #[inline] pub fn get_resource_id(&self, type_id: TypeId) -> Option { - self.resource_indices.get(&type_id).copied() + self.resource_indices.get(&type_id).copied().or_else(|| { + self.queued + .read() + .unwrap_or_else(PoisonError::into_inner) + .resources + .get(&type_id) + .map(|queued| queued.id) + }) } /// Returns the [`ComponentId`] of the given [`Resource`] type `T`. @@ -1609,7 +2487,7 @@ impl Components { /// instance. /// /// Returns [`None`] if the `Resource` type has not - /// yet been initialized using [`Components::register_resource()`]. + /// yet been initialized using [`ComponentsRegistrator::register_resource()`] or [`ComponentsQueuedRegistrator::queue_register_resource()`]. /// /// ``` /// use bevy_ecs::prelude::*; @@ -1633,84 +2511,29 @@ impl Components { self.get_resource_id(TypeId::of::()) } - /// Registers a [`Resource`] of type `T` with this instance. - /// If a resource of this type has already been registered, this will return - /// the ID of the pre-existing resource. - /// - /// # See also - /// - /// * [`Components::resource_id()`] - /// * [`Components::register_resource_with_descriptor()`] - #[inline] - pub fn register_resource(&mut self) -> ComponentId { - // SAFETY: The [`ComponentDescriptor`] matches the [`TypeId`] - unsafe { - self.get_or_register_resource_with(TypeId::of::(), || { - ComponentDescriptor::new_resource::() - }) - } - } - - /// Registers a [`Resource`] described by `descriptor`. - /// - /// # Note - /// - /// If this method is called multiple times with identical descriptors, a distinct [`ComponentId`] - /// will be created for each one. - /// - /// # See also - /// - /// * [`Components::resource_id()`] - /// * [`Components::register_resource()`] - pub fn register_resource_with_descriptor( - &mut self, - descriptor: ComponentDescriptor, - ) -> ComponentId { - Components::register_resource_inner(&mut self.components, descriptor) - } - - /// Registers a [non-send resource](crate::system::NonSend) of type `T` with this instance. - /// If a resource of this type has already been registered, this will return - /// the ID of the pre-existing resource. - #[inline] - pub fn register_non_send(&mut self) -> ComponentId { - // SAFETY: The [`ComponentDescriptor`] matches the [`TypeId`] - unsafe { - self.get_or_register_resource_with(TypeId::of::(), || { - ComponentDescriptor::new_non_send::(StorageType::default()) - }) - } - } - /// # Safety /// - /// The [`ComponentDescriptor`] must match the [`TypeId`] + /// The [`ComponentDescriptor`] must match the [`TypeId`]. + /// The [`ComponentId`] must be unique. + /// The [`TypeId`] and [`ComponentId`] must not be registered or queued. #[inline] - unsafe fn get_or_register_resource_with( + unsafe fn register_resource_unchecked( &mut self, type_id: TypeId, - func: impl FnOnce() -> ComponentDescriptor, - ) -> ComponentId { - let components = &mut self.components; - *self.resource_indices.entry(type_id).or_insert_with(|| { - let descriptor = func(); - Components::register_resource_inner(components, descriptor) - }) - } - - #[inline] - fn register_resource_inner( - components: &mut Vec, + component_id: ComponentId, descriptor: ComponentDescriptor, - ) -> ComponentId { - let component_id = ComponentId(components.len()); - components.push(ComponentInfo::new(component_id, descriptor)); - component_id + ) { + // SAFETY: ensured by caller + unsafe { + self.register_component_inner(component_id, descriptor); + } + let prev = self.resource_indices.insert(type_id, component_id); + debug_assert!(prev.is_none()); } - /// Gets an iterator over all components registered with this instance. - pub fn iter(&self) -> impl Iterator + '_ { - self.components.iter() + /// Gets an iterator over all components fully registered with this instance. + pub fn iter_registered(&self) -> impl Iterator + '_ { + self.components.iter().filter_map(Option::as_ref) } } @@ -1722,7 +2545,7 @@ impl Components { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] pub struct Tick { tick: u32, @@ -1819,7 +2642,7 @@ impl<'a> TickCells<'a> { /// Records when a component or resource was added and when it was last mutably dereferenced (or added). #[derive(Copy, Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct ComponentTicks { /// Tick recording the time this component or resource was added. pub added: Tick, @@ -1853,7 +2676,7 @@ impl ComponentTicks { /// Manually sets the change tick. /// - /// This is normally done automatically via the [`DerefMut`](std::ops::DerefMut) implementation + /// This is normally done automatically via the [`DerefMut`] implementation /// on [`Mut`](crate::change_detection::Mut), [`ResMut`](crate::change_detection::ResMut), etc. /// However, components and resources that make use of interior mutability might require manual updates. /// @@ -1894,7 +2717,7 @@ impl ComponentIdFor<'_, T> { } } -impl core::ops::Deref for ComponentIdFor<'_, T> { +impl Deref for ComponentIdFor<'_, T> { type Target = ComponentId; fn deref(&self) -> &Self::Target { &self.0.component_id @@ -1936,17 +2759,9 @@ pub enum RequiredComponentsError { } /// A Required Component constructor. See [`Component`] for details. -#[cfg(feature = "track_location")] #[derive(Clone)] pub struct RequiredComponentConstructor( - pub Arc)>, -); - -/// A Required Component constructor. See [`Component`] for details. -#[cfg(not(feature = "track_location"))] -#[derive(Clone)] -pub struct RequiredComponentConstructor( - pub Arc, + pub Arc, ); impl RequiredComponentConstructor { @@ -1966,17 +2781,9 @@ impl RequiredComponentConstructor { change_tick: Tick, table_row: TableRow, entity: Entity, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { - (self.0)( - table, - sparse_sets, - change_tick, - table_row, - entity, - #[cfg(feature = "track_location")] - caller, - ); + (self.0)(table, sparse_sets, change_tick, table_row, entity, caller); } } @@ -2025,25 +2832,30 @@ impl RequiredComponents { /// `constructor` _must_ initialize a component for `component_id` in such a way that /// matches the storage type of the component. It must only use the given `table_row` or `Entity` to /// initialize the storage for `component_id` corresponding to the given entity. - pub unsafe fn register_dynamic( + pub unsafe fn register_dynamic_with( &mut self, component_id: ComponentId, - constructor: RequiredComponentConstructor, inheritance_depth: u16, + constructor: impl FnOnce() -> RequiredComponentConstructor, ) { - self.0 - .entry(component_id) - .and_modify(|component| { - if component.inheritance_depth > inheritance_depth { - // New registration is more specific than existing requirement - component.constructor = constructor.clone(); - component.inheritance_depth = inheritance_depth; + let entry = self.0.entry(component_id); + match entry { + bevy_platform::collections::hash_map::Entry::Occupied(mut occupied) => { + let current = occupied.get_mut(); + if current.inheritance_depth > inheritance_depth { + *current = RequiredComponent { + constructor: constructor(), + inheritance_depth, + } } - }) - .or_insert(RequiredComponent { - constructor, - inheritance_depth, - }); + } + bevy_platform::collections::hash_map::Entry::Vacant(vacant) => { + vacant.insert(RequiredComponent { + constructor: constructor(), + inheritance_depth, + }); + } + } } /// Registers a required component. @@ -2052,7 +2864,7 @@ impl RequiredComponents { /// is smaller than the depth of the existing registration. Otherwise, the new registration will be ignored. pub fn register( &mut self, - components: &mut Components, + components: &mut ComponentsRegistrator, constructor: fn() -> C, inheritance_depth: u16, ) { @@ -2070,75 +2882,66 @@ impl RequiredComponents { constructor: fn() -> C, inheritance_depth: u16, ) { - let erased: RequiredComponentConstructor = RequiredComponentConstructor({ - // `portable-atomic-util` `Arc` is not able to coerce an unsized - // type like `std::sync::Arc` can. Creating a `Box` first does the - // coercion. - // - // This would be resolved by https://github.com/rust-lang/rust/issues/123430 + let erased = || { + RequiredComponentConstructor({ + // `portable-atomic-util` `Arc` is not able to coerce an unsized + // type like `std::sync::Arc` can. Creating a `Box` first does the + // coercion. + // + // This would be resolved by https://github.com/rust-lang/rust/issues/123430 - #[cfg(feature = "portable-atomic")] - use alloc::boxed::Box; + #[cfg(not(target_has_atomic = "ptr"))] + use alloc::boxed::Box; - #[cfg(feature = "track_location")] - type Constructor = dyn for<'a, 'b> Fn( - &'a mut Table, - &'b mut SparseSets, - Tick, - TableRow, - Entity, - &'static Location<'static>, - ); + type Constructor = dyn for<'a, 'b> Fn( + &'a mut Table, + &'b mut SparseSets, + Tick, + TableRow, + Entity, + MaybeLocation, + ); - #[cfg(not(feature = "track_location"))] - type Constructor = - dyn for<'a, 'b> Fn(&'a mut Table, &'b mut SparseSets, Tick, TableRow, Entity); + #[cfg(not(target_has_atomic = "ptr"))] + type Intermediate = Box; - #[cfg(feature = "portable-atomic")] - type Intermediate = Box; + #[cfg(target_has_atomic = "ptr")] + type Intermediate = Arc; - #[cfg(not(feature = "portable-atomic"))] - type Intermediate = Arc; + let boxed: Intermediate = Intermediate::new( + move |table, sparse_sets, change_tick, table_row, entity, caller| { + OwningPtr::make(constructor(), |ptr| { + // SAFETY: This will only be called in the context of `BundleInfo::write_components`, which will + // pass in a valid table_row and entity requiring a C constructor + // C::STORAGE_TYPE is the storage type associated with `component_id` / `C` + // `ptr` points to valid `C` data, which matches the type associated with `component_id` + unsafe { + BundleInfo::initialize_required_component( + table, + sparse_sets, + change_tick, + table_row, + entity, + component_id, + C::STORAGE_TYPE, + ptr, + caller, + ); + } + }); + }, + ); - let boxed: Intermediate = Intermediate::new( - move |table, - sparse_sets, - change_tick, - table_row, - entity, - #[cfg(feature = "track_location")] caller| { - OwningPtr::make(constructor(), |ptr| { - // SAFETY: This will only be called in the context of `BundleInfo::write_components`, which will - // pass in a valid table_row and entity requiring a C constructor - // C::STORAGE_TYPE is the storage type associated with `component_id` / `C` - // `ptr` points to valid `C` data, which matches the type associated with `component_id` - unsafe { - BundleInfo::initialize_required_component( - table, - sparse_sets, - change_tick, - table_row, - entity, - component_id, - C::STORAGE_TYPE, - ptr, - #[cfg(feature = "track_location")] - caller, - ); - } - }); - }, - ); - - Arc::from(boxed) - }); + Arc::from(boxed) + }) + }; // SAFETY: // `component_id` matches the type initialized by the `erased` constructor above. // `erased` initializes a component for `component_id` in such a way that // matches the storage type of the component. It only uses the given `table_row` or `Entity` to // initialize the storage corresponding to the given entity. - unsafe { self.register_dynamic(component_id, erased, inheritance_depth) }; + unsafe { self.register_dynamic_with(component_id, inheritance_depth, erased) }; } /// Iterates the ids of all required components. This includes recursive required components. @@ -2156,11 +2959,26 @@ impl RequiredComponents { } } - // Merges `required_components` into this collection. This only inserts a required component - // if it _did not already exist_. + /// Merges `required_components` into this collection. This only inserts a required component + /// if it _did not already exist_ *or* if the required component is more specific than the existing one + /// (in other words, if the inheritance depth is smaller). + /// + /// See [`register_dynamic_with`](Self::register_dynamic_with) for details. pub(crate) fn merge(&mut self, required_components: &RequiredComponents) { - for (id, constructor) in &required_components.0 { - self.0.entry(*id).or_insert_with(|| constructor.clone()); + for ( + component_id, + RequiredComponent { + constructor, + inheritance_depth, + }, + ) in required_components.0.iter() + { + // SAFETY: This exact registration must have been done on `required_components`, so safety is ensured by that caller. + unsafe { + self.register_dynamic_with(*component_id, *inheritance_depth, || { + constructor.clone() + }); + } } } } @@ -2183,13 +3001,13 @@ pub fn enforce_no_required_components_recursion( "Recursive required components detected: {}\nhelp: {}", recursion_check_stack .iter() - .map(|id| format!("{}", ShortName(components.get_name(*id).unwrap()))) + .map(|id| format!("{}", ShortName(&components.get_name(*id).unwrap()))) .collect::>() .join(" → "), if direct_recursion { format!( "Remove require({}).", - ShortName(components.get_name(requiree).unwrap()) + ShortName(&components.get_name(requiree).unwrap()) ) } else { "If this is intentional, consider merging the components.".into() @@ -2204,10 +3022,10 @@ pub fn enforce_no_required_components_recursion( /// It will panic if set as handler for any other component. /// pub fn component_clone_via_clone( - _commands: &mut Commands, + source: &SourceComponent, ctx: &mut ComponentCloneCtx, ) { - if let Some(component) = ctx.read_source_component::() { + if let Some(component) = source.read::() { ctx.write_target_component(component.clone()); } } @@ -2221,24 +3039,39 @@ pub fn component_clone_via_clone( /// - Component has [`TypeId`] /// - Component is registered /// - Component has [`ReflectFromPtr`](bevy_reflect::ReflectFromPtr) registered -/// - Component has one of the following registered: [`ReflectFromReflect`](bevy_reflect::ReflectFromReflect), +/// - Component can be cloned via [`PartialReflect::reflect_clone`] _or_ has one of the following registered: [`ReflectFromReflect`](bevy_reflect::ReflectFromReflect), /// [`ReflectDefault`](bevy_reflect::std_traits::ReflectDefault), [`ReflectFromWorld`](crate::reflect::ReflectFromWorld) /// /// If any of the conditions is not satisfied, the component will be skipped. /// /// See [`EntityClonerBuilder`](crate::entity::EntityClonerBuilder) for details. +/// +/// [`PartialReflect::reflect_clone`]: bevy_reflect::PartialReflect::reflect_clone #[cfg(feature = "bevy_reflect")] -pub fn component_clone_via_reflect(commands: &mut Commands, ctx: &mut ComponentCloneCtx) { +pub fn component_clone_via_reflect(source: &SourceComponent, ctx: &mut ComponentCloneCtx) { let Some(app_registry) = ctx.type_registry().cloned() else { return; }; - let Some(source_component_reflect) = ctx.read_source_component_reflect() else { + let registry = app_registry.read(); + let Some(source_component_reflect) = source.read_reflect(®istry) else { return; }; let component_info = ctx.component_info(); // checked in read_source_component_reflect let type_id = component_info.type_id().unwrap(); - let registry = app_registry.read(); + + // Try to clone using `reflect_clone` + if let Ok(mut component) = source_component_reflect.reflect_clone() { + if let Some(reflect_component) = + registry.get_type_data::(type_id) + { + reflect_component.map_entities(&mut *component, ctx.entity_mapper()); + } + drop(registry); + + ctx.write_target_component_reflect(component); + return; + } // Try to clone using ReflectFromReflect if let Some(reflect_from_reflect) = @@ -2250,9 +3083,7 @@ pub fn component_clone_via_reflect(commands: &mut Commands, ctx: &mut ComponentC if let Some(reflect_component) = registry.get_type_data::(type_id) { - reflect_component.visit_entities_mut(&mut *component, &mut |entity| { - *entity = ctx.entity_mapper().get_mapped(*entity); - }); + reflect_component.map_entities(&mut *component, ctx.entity_mapper()); } drop(registry); @@ -2275,23 +3106,12 @@ pub fn component_clone_via_reflect(commands: &mut Commands, ctx: &mut ComponentC registry.get_type_data::(type_id) { let reflect_from_world = reflect_from_world.clone(); - let mut mapped_entities = Vec::new(); - if let Some(reflect_component) = - registry.get_type_data::(type_id) - { - reflect_component.visit_entities(source_component_reflect, &mut |entity| { - mapped_entities.push(entity); - }); - } - let source_component_cloned = source_component_reflect.clone_value(); + let source_component_cloned = source_component_reflect.to_dynamic(); let component_layout = component_info.layout(); let target = ctx.target(); let component_id = ctx.component_id(); - for entity in mapped_entities.iter_mut() { - *entity = ctx.entity_mapper().get_mapped(*entity); - } drop(registry); - commands.queue(move |world: &mut World| { + ctx.queue_deferred(move |world: &mut World, mapper: &mut dyn EntityMapper| { let mut component = reflect_from_world.from_world(world); assert_eq!(type_id, (*component).type_id()); component.apply(source_component_cloned.as_partial_reflect()); @@ -2299,11 +3119,7 @@ pub fn component_clone_via_reflect(commands: &mut Commands, ctx: &mut ComponentC .read() .get_type_data::(type_id) { - let mut i = 0; - reflect_component.visit_entities_mut(&mut *component, &mut |entity| { - *entity = mapped_entities[i]; - i += 1; - }); + reflect_component.map_entities(&mut *component, mapper); } // SAFETY: // - component_id is from the same world as target entity @@ -2314,7 +3130,11 @@ pub fn component_clone_via_reflect(commands: &mut Commands, ctx: &mut ComponentC world .entity_mut(target) .insert_by_id(component_id, OwningPtr::new(raw_component_ptr)); - alloc::alloc::dealloc(raw_component_ptr.as_ptr(), component_layout); + + if component_layout.size() > 0 { + // Ensure we don't attempt to deallocate zero-sized components + alloc::alloc::dealloc(raw_component_ptr.as_ptr(), component_layout); + } } }); } @@ -2323,7 +3143,7 @@ pub fn component_clone_via_reflect(commands: &mut Commands, ctx: &mut ComponentC /// Noop implementation of component clone handler function. /// /// See [`EntityClonerBuilder`](crate::entity::EntityClonerBuilder) for details. -pub fn component_clone_ignore(_commands: &mut Commands, _ctx: &mut ComponentCloneCtx) {} +pub fn component_clone_ignore(_source: &SourceComponent, _ctx: &mut ComponentCloneCtx) {} /// Wrapper for components clone specialization using autoderef. #[doc(hidden)] diff --git a/crates/bevy_ecs/src/entity/clone_entities.rs b/crates/bevy_ecs/src/entity/clone_entities.rs index 389f30ff8d..bd8eb2b4bd 100644 --- a/crates/bevy_ecs/src/entity/clone_entities.rs +++ b/crates/bevy_ecs/src/entity/clone_entities.rs @@ -1,25 +1,70 @@ -use alloc::{borrow::ToOwned, vec::Vec}; -use bevy_platform_support::collections::{HashMap, HashSet}; +use alloc::{borrow::ToOwned, boxed::Box, collections::VecDeque, vec::Vec}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_ptr::{Ptr, PtrMut}; use bumpalo::Bump; -use core::{any::TypeId, ptr::NonNull}; +use core::any::TypeId; -#[cfg(feature = "bevy_reflect")] -use alloc::boxed::Box; - -use crate::component::{ComponentCloneBehavior, ComponentCloneFn}; -use crate::entity::hash_map::EntityHashMap; -use crate::entity::EntityMapper; -use crate::system::Commands; use crate::{ bundle::Bundle, - component::{Component, ComponentId, ComponentInfo, Components}, - entity::Entity, + component::{Component, ComponentCloneBehavior, ComponentCloneFn, ComponentId, ComponentInfo}, + entity::{hash_map::EntityHashMap, Entities, Entity, EntityMapper}, query::DebugCheckedUnwrap, + relationship::RelationshipHookMode, world::World, }; -use alloc::collections::VecDeque; -use core::cell::RefCell; + +/// Provides read access to the source component (the component being cloned) in a [`ComponentCloneFn`]. +pub struct SourceComponent<'a> { + ptr: Ptr<'a>, + info: &'a ComponentInfo, +} + +impl<'a> SourceComponent<'a> { + /// Returns a reference to the component on the source entity. + /// + /// Will return `None` if `ComponentId` of requested component does not match `ComponentId` of source component + pub fn read(&self) -> Option<&C> { + if self + .info + .type_id() + .is_some_and(|id| id == TypeId::of::()) + { + // SAFETY: + // - Components and ComponentId are from the same world + // - source_component_ptr holds valid data of the type referenced by ComponentId + unsafe { Some(self.ptr.deref::()) } + } else { + None + } + } + + /// Returns the "raw" pointer to the source component. + pub fn ptr(&self) -> Ptr<'a> { + self.ptr + } + + /// Returns a reference to the component on the source entity as [`&dyn Reflect`](bevy_reflect::Reflect). + /// + /// Will return `None` if: + /// - World does not have [`AppTypeRegistry`](`crate::reflect::AppTypeRegistry`). + /// - Component does not implement [`ReflectFromPtr`](bevy_reflect::ReflectFromPtr). + /// - Component is not registered. + /// - Component does not have [`TypeId`] + /// - Registered [`ReflectFromPtr`](bevy_reflect::ReflectFromPtr)'s [`TypeId`] does not match component's [`TypeId`] + #[cfg(feature = "bevy_reflect")] + pub fn read_reflect( + &self, + registry: &bevy_reflect::TypeRegistry, + ) -> Option<&dyn bevy_reflect::Reflect> { + let type_id = self.info.type_id()?; + let reflect_from_ptr = registry.get_type_data::(type_id)?; + if reflect_from_ptr.type_id() != type_id { + return None; + } + // SAFETY: `source_component_ptr` stores data represented by `component_id`, which we used to get `ReflectFromPtr`. + unsafe { Some(reflect_from_ptr.as_reflect(self.ptr)) } + } +} /// Context for component clone handlers. /// @@ -27,20 +72,19 @@ use core::cell::RefCell; /// and allows component clone handler to get information about component being cloned. pub struct ComponentCloneCtx<'a, 'b> { component_id: ComponentId, - source_component_ptr: Ptr<'a>, target_component_written: bool, bundle_scratch: &'a mut BundleScratch<'b>, bundle_scratch_allocator: &'b Bump, + entities: &'a Entities, source: Entity, target: Entity, - components: &'a Components, component_info: &'a ComponentInfo, entity_cloner: &'a mut EntityCloner, mapper: &'a mut dyn EntityMapper, #[cfg(feature = "bevy_reflect")] type_registry: Option<&'a crate::reflect::AppTypeRegistry>, #[cfg(not(feature = "bevy_reflect"))] - #[expect(dead_code)] + #[expect(dead_code, reason = "type_registry is only used with bevy_reflect")] type_registry: Option<&'a ()>, } @@ -49,16 +93,16 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { /// /// # Safety /// Caller must ensure that: - /// - `components` and `component_id` are from the same world. + /// - `component_info` corresponds to the `component_id` in the same world,. /// - `source_component_ptr` points to a valid component of type represented by `component_id`. unsafe fn new( component_id: ComponentId, source: Entity, target: Entity, - source_component_ptr: Ptr<'a>, bundle_scratch_allocator: &'b Bump, bundle_scratch: &'a mut BundleScratch<'b>, - components: &'a Components, + entities: &'a Entities, + component_info: &'a ComponentInfo, entity_cloner: &'a mut EntityCloner, mapper: &'a mut dyn EntityMapper, #[cfg(feature = "bevy_reflect")] type_registry: Option<&'a crate::reflect::AppTypeRegistry>, @@ -68,13 +112,12 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { component_id, source, target, - source_component_ptr, bundle_scratch, target_component_written: false, bundle_scratch_allocator, - components, + entities, mapper, - component_info: components.get_info_unchecked(component_id), + component_info, entity_cloner, type_registry, } @@ -109,8 +152,8 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { /// entities stored in a cloned entity's [`RelationshipTarget`](crate::relationship::RelationshipTarget) component with /// [`RelationshipTarget::LINKED_SPAWN`](crate::relationship::RelationshipTarget::LINKED_SPAWN) will also be cloned. #[inline] - pub fn is_recursive(&self) -> bool { - self.entity_cloner.is_recursive + pub fn linked_cloning(&self) -> bool { + self.entity_cloner.linked_cloning } /// Returns this context's [`EntityMapper`]. @@ -118,44 +161,6 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { self.mapper } - /// Returns a reference to the component on the source entity. - /// - /// Will return `None` if `ComponentId` of requested component does not match `ComponentId` of source component - pub fn read_source_component(&self) -> Option<&T> { - if self - .component_info - .type_id() - .is_some_and(|id| id == TypeId::of::()) - { - // SAFETY: - // - Components and ComponentId are from the same world - // - source_component_ptr holds valid data of the type referenced by ComponentId - unsafe { Some(self.source_component_ptr.deref::()) } - } else { - None - } - } - - /// Returns a reference to the component on the source entity as [`&dyn Reflect`](bevy_reflect::Reflect). - /// - /// Will return `None` if: - /// - World does not have [`AppTypeRegistry`](`crate::reflect::AppTypeRegistry`). - /// - Component does not implement [`ReflectFromPtr`](bevy_reflect::ReflectFromPtr). - /// - Component is not registered. - /// - Component does not have [`TypeId`] - /// - Registered [`ReflectFromPtr`](bevy_reflect::ReflectFromPtr)'s [`TypeId`] does not match component's [`TypeId`] - #[cfg(feature = "bevy_reflect")] - pub fn read_source_component_reflect(&self) -> Option<&dyn bevy_reflect::Reflect> { - let registry = self.type_registry?.read(); - let type_id = self.component_info.type_id()?; - let reflect_from_ptr = registry.get_type_data::(type_id)?; - if reflect_from_ptr.type_id() != type_id { - return None; - } - // SAFETY: `source_component_ptr` stores data represented by `component_id`, which we used to get `ReflectFromPtr`. - unsafe { Some(reflect_from_ptr.as_reflect(self.source_component_ptr)) } - } - /// Writes component data to target entity. /// /// # Panics @@ -164,9 +169,7 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { /// - Component being written is not registered in the world. /// - `ComponentId` of component being written does not match expected `ComponentId`. pub fn write_target_component(&mut self, mut component: C) { - C::visit_entities_mut(&mut component, |entity| { - *entity = self.mapper.get_mapped(*entity); - }); + C::map_entities(&mut component, &mut self.mapper); let short_name = disqualified::ShortName::of::(); if self.target_component_written { panic!("Trying to write component '{short_name}' multiple times") @@ -186,33 +189,24 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { self.target_component_written = true; } - /// Writes component data to target entity by providing a pointer to source component data and a pointer to uninitialized target component data. - /// - /// This method allows caller to provide a function (`clone_fn`) to clone component using untyped pointers. - /// First argument to `clone_fn` points to source component data ([`Ptr`]), second argument points to uninitialized buffer ([`NonNull`]) allocated with layout - /// described by [`ComponentInfo`] stored in this [`ComponentCloneCtx`]. If cloning is successful and uninitialized buffer contains a valid clone of - /// source component, `clone_fn` should return `true`, otherwise it should return `false`. + /// Writes component data to target entity by providing a pointer to source component data. /// /// # Safety - /// Caller must ensure that if `clone_fn` is called and returns `true`, the second argument ([`NonNull`] pointer) points to a valid component data - /// described by [`ComponentInfo`] stored in this [`ComponentCloneCtx`]. + /// Caller must ensure that the passed in `ptr` references data that corresponds to the type of the source / target [`ComponentId`]. + /// `ptr` must also contain data that the written component can "own" (for example, this should not directly copy non-Copy data). + /// /// # Panics /// This will panic if component has already been written once. - pub unsafe fn write_target_component_ptr( - &mut self, - clone_fn: impl FnOnce(Ptr, NonNull) -> bool, - ) { + pub unsafe fn write_target_component_ptr(&mut self, ptr: Ptr) { if self.target_component_written { panic!("Trying to write component multiple times") } let layout = self.component_info.layout(); - let target_component_data_ptr = self.bundle_scratch_allocator.alloc_layout(layout); - - if clone_fn(self.source_component_ptr, target_component_data_ptr) { - self.bundle_scratch - .push_ptr(self.component_id, PtrMut::new(target_component_data_ptr)); - self.target_component_written = true; - } + let target_ptr = self.bundle_scratch_allocator.alloc_layout(layout); + core::ptr::copy_nonoverlapping(ptr.as_ptr(), target_ptr.as_ptr(), layout.size()); + self.bundle_scratch + .push_ptr(self.component_id, PtrMut::new(target_ptr)); + self.target_component_written = true; } /// Writes component data to target entity. @@ -253,17 +247,16 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { ); self.bundle_scratch .push_ptr(self.component_id, PtrMut::new(target_component_data_ptr)); - alloc::alloc::dealloc(component_data_ptr, component_layout); + + if component_layout.size() > 0 { + // Ensure we don't attempt to deallocate zero-sized components + alloc::alloc::dealloc(component_data_ptr, component_layout); + } } self.target_component_written = true; } - /// Returns instance of [`Components`]. - pub fn components(&self) -> &Components { - self.components - } - /// Returns [`AppTypeRegistry`](`crate::reflect::AppTypeRegistry`) if it exists in the world. /// /// NOTE: Prefer this method instead of manually reading the resource from the world. @@ -273,11 +266,21 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { } /// Queues the `entity` to be cloned by the current [`EntityCloner`] - pub fn queue_entity_clone(&self, entity: Entity) { + pub fn queue_entity_clone(&mut self, entity: Entity) { + let target = self.entities.reserve_entity(); + self.mapper.set_mapped(entity, target); + self.entity_cloner.clone_queue.push_back(entity); + } + + /// Queues a deferred clone operation, which will run with exclusive [`World`] access immediately after calling the clone handler for each component on an entity. + /// This exists, despite its similarity to [`Commands`](crate::system::Commands), to provide access to the entity mapper in the current context. + pub fn queue_deferred( + &mut self, + deferred: impl FnOnce(&mut World, &mut dyn EntityMapper) + 'static, + ) { self.entity_cloner - .clone_queue - .borrow_mut() - .push_back(entity); + .deferred_commands + .push_back(Box::new(deferred)); } } @@ -340,27 +343,28 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { /// 2. component-defined handler using [`Component::clone_behavior`] /// 3. default handler override using [`EntityClonerBuilder::with_default_clone_fn`]. /// 4. reflect-based or noop default clone handler depending on if `bevy_reflect` feature is enabled or not. -#[derive(Debug)] pub struct EntityCloner { filter_allows_components: bool, filter: HashSet, clone_behavior_overrides: HashMap, move_components: bool, - is_recursive: bool, + linked_cloning: bool, default_clone_fn: ComponentCloneFn, - clone_queue: RefCell>, + clone_queue: VecDeque, + deferred_commands: VecDeque>, } impl Default for EntityCloner { fn default() -> Self { Self { filter_allows_components: false, + move_components: false, + linked_cloning: false, + default_clone_fn: ComponentCloneBehavior::global_default_fn(), filter: Default::default(), clone_behavior_overrides: Default::default(), - move_components: false, - is_recursive: false, - default_clone_fn: ComponentCloneBehavior::global_default_fn(), clone_queue: Default::default(), + deferred_commands: Default::default(), } } } @@ -410,14 +414,20 @@ impl<'a> BundleScratch<'a> { /// /// # Safety /// All [`ComponentId`] values in this instance must come from `world`. - pub(crate) unsafe fn write(self, world: &mut World, entity: Entity) { + pub(crate) unsafe fn write( + self, + world: &mut World, + entity: Entity, + relationship_hook_insert_mode: RelationshipHookMode, + ) { // SAFETY: // - All `component_ids` are from the same world as `target` entity // - All `component_data_ptrs` are valid types represented by `component_ids` unsafe { - world.entity_mut(entity).insert_by_ids( + world.entity_mut(entity).insert_by_ids_internal( &self.component_ids, self.component_ptrs.into_iter().map(|ptr| ptr.promote()), + relationship_hook_insert_mode, ); } } @@ -433,10 +443,11 @@ impl EntityCloner { } } - /// Returns `true` if this cloner is configured to clone entities recursively. + /// Returns `true` if this cloner is configured to clone entities referenced in cloned components via [`RelationshipTarget::LINKED_SPAWN`](crate::relationship::RelationshipTarget::LINKED_SPAWN). + /// This will produce "deep" / recursive clones of relationship trees that have "linked spawn". #[inline] - pub fn is_recursive(&self) -> bool { - self.is_recursive + pub fn linked_cloning(&self) -> bool { + self.linked_cloning } /// Clones and inserts components from the `source` entity into the entity mapped by `mapper` from `source` using the stored configuration. @@ -445,6 +456,7 @@ impl EntityCloner { world: &mut World, source: Entity, mapper: &mut dyn EntityMapper, + relationship_hook_insert_mode: RelationshipHookMode, ) -> Entity { let target = mapper.get_mapped(source); // PERF: reusing allocated space across clones would be more efficient. Consider an allocation model similar to `Commands`. @@ -467,10 +479,6 @@ impl EntityCloner { let archetype = source_entity.archetype(); bundle_scratch = BundleScratch::with_capacity(archetype.component_count()); - // SAFETY: no other references to command queue exist - let mut commands = unsafe { - Commands::new_raw_from_entities(world.get_raw_command_queue(), world.entities()) - }; for component in archetype.components() { if !self.is_cloning_allowed(&component) { @@ -486,12 +494,20 @@ impl EntityCloner { .unwrap_or(self.default_clone_fn), }; + // SAFETY: This component exists because it is present on the archetype. + let info = unsafe { world.components().get_info_unchecked(component) }; + // SAFETY: // - There are no other mutable references to source entity. // - `component` is from `source_entity`'s archetype let source_component_ptr = unsafe { source_entity.get_by_id(component).debug_checked_unwrap() }; + let source_component = SourceComponent { + info, + ptr: source_component_ptr, + }; + // SAFETY: // - `components` and `component` are from the same world // - `source_component_ptr` is valid and points to the same type as represented by `component` @@ -500,22 +516,26 @@ impl EntityCloner { component, source, target, - source_component_ptr, &bundle_scratch_allocator, &mut bundle_scratch, - world.components(), + world.entities(), + info, self, mapper, app_registry.as_ref(), ) }; - (handler)(&mut commands, &mut ctx); + (handler)(&source_component, &mut ctx); } } world.flush(); + for deferred in self.deferred_commands.drain(..) { + (deferred)(world, mapper); + } + if !world.entities.contains(target) { panic!("Target entity does not exist"); } @@ -529,12 +549,12 @@ impl EntityCloner { // SAFETY: // - All `component_ids` are from the same world as `target` entity // - All `component_data_ptrs` are valid types represented by `component_ids` - unsafe { bundle_scratch.write(world, target) }; + unsafe { bundle_scratch.write(world, target, relationship_hook_insert_mode) }; target } /// Clones and inserts components from the `source` entity into `target` entity using the stored configuration. - /// If this [`EntityCloner`] has [`EntityCloner::is_recursive`], then it will recursively spawn entities as defined + /// If this [`EntityCloner`] has [`EntityCloner::linked_cloning`], then it will recursively spawn entities as defined /// by [`RelationshipTarget`](crate::relationship::RelationshipTarget) components with /// [`RelationshipTarget::LINKED_SPAWN`](crate::relationship::RelationshipTarget::LINKED_SPAWN) #[track_caller] @@ -545,7 +565,7 @@ impl EntityCloner { } /// Clones and inserts components from the `source` entity into a newly spawned entity using the stored configuration. - /// If this [`EntityCloner`] has [`EntityCloner::is_recursive`], then it will recursively spawn entities as defined + /// If this [`EntityCloner`] has [`EntityCloner::linked_cloning`], then it will recursively spawn entities as defined /// by [`RelationshipTarget`](crate::relationship::RelationshipTarget) components with /// [`RelationshipTarget::LINKED_SPAWN`](crate::relationship::RelationshipTarget::LINKED_SPAWN) #[track_caller] @@ -563,13 +583,21 @@ impl EntityCloner { source: Entity, mapper: &mut dyn EntityMapper, ) -> Entity { - let target = self.clone_entity_internal(world, source, mapper); + // All relationships on the root should have their hooks run + let target = self.clone_entity_internal(world, source, mapper, RelationshipHookMode::Run); + let child_hook_insert_mode = if self.linked_cloning { + // When spawning "linked relationships", we want to ignore hooks for relationships we are spawning, while + // still registering with original relationship targets that are "not linked" to the current recursive spawn. + RelationshipHookMode::RunIfNotLinked + } else { + // If we are not cloning "linked relationships" recursively, then we want any cloned relationship components to + // register themselves with their original relationship target. + RelationshipHookMode::Run + }; loop { - let queued = self.clone_queue.borrow_mut().pop_front(); + let queued = self.clone_queue.pop_front(); if let Some(queued) = queued { - let target = world.entities.reserve_entity(); - mapper.set_mapped(queued, target); - self.clone_entity_internal(world, queued, mapper); + self.clone_entity_internal(world, queued, mapper, child_hook_insert_mode); } else { break; } @@ -584,7 +612,6 @@ impl EntityCloner { } /// A builder for configuring [`EntityCloner`]. See [`EntityCloner`] for more information. -#[derive(Debug)] pub struct EntityClonerBuilder<'w> { world: &'w mut World, entity_cloner: EntityCloner, @@ -609,7 +636,7 @@ impl<'w> EntityClonerBuilder<'w> { /// will not involve required components. pub fn without_required_components( &mut self, - builder: impl FnOnce(&mut EntityClonerBuilder) + Send + Sync + 'static, + builder: impl FnOnce(&mut EntityClonerBuilder), ) -> &mut Self { self.attach_required_components = false; builder(self); @@ -764,10 +791,10 @@ impl<'w> EntityClonerBuilder<'w> { self } - /// If `true`, makes the built [`EntityCloner`] recursively clone entities, as defined by - /// [`RelationshipTarget::LINKED_SPAWN`](crate::relationship::RelationshipTarget::LINKED_SPAWN). - pub fn recursive(&mut self, is_recursive: bool) -> &mut Self { - self.entity_cloner.is_recursive = is_recursive; + /// When true this cloner will be configured to clone entities referenced in cloned components via [`RelationshipTarget::LINKED_SPAWN`](crate::relationship::RelationshipTarget::LINKED_SPAWN). + /// This will produce "deep" / recursive clones of relationship trees that have "linked spawn". + pub fn linked_cloning(&mut self, linked_cloning: bool) -> &mut Self { + self.entity_cloner.linked_cloning = linked_cloning; self } @@ -817,17 +844,15 @@ mod tests { use super::ComponentCloneCtx; use crate::{ component::{Component, ComponentCloneBehavior, ComponentDescriptor, StorageType}, - entity::{hash_map::EntityHashMap, Entity, EntityCloner}, + entity::{Entity, EntityCloner, EntityHashMap, SourceComponent}, prelude::{ChildOf, Children, Resource}, - reflect::AppTypeRegistry, - reflect::{ReflectComponent, ReflectFromWorld}, - system::Commands, + reflect::{AppTypeRegistry, ReflectComponent, ReflectFromWorld}, world::{FromWorld, World}, }; use alloc::vec::Vec; - use bevy_ecs_macros::require; use bevy_ptr::OwningPtr; use bevy_reflect::Reflect; + use core::marker::PhantomData; use core::{alloc::Layout, ops::Deref}; #[cfg(feature = "bevy_reflect")] @@ -835,9 +860,8 @@ mod tests { use super::*; use crate::{ component::{Component, ComponentCloneBehavior}, - entity::EntityCloner, + entity::{EntityCloner, SourceComponent}, reflect::{AppTypeRegistry, ReflectComponent, ReflectFromWorld}, - system::Commands, }; use alloc::vec; use bevy_reflect::{std_traits::ReflectDefault, FromType, Reflect, ReflectFromPtr}; @@ -868,67 +892,95 @@ mod tests { assert!(world.get::(e_clone).is_some_and(|c| *c == component)); } - // TODO: remove this when https://github.com/bevyengine/bevy/pull/13432 lands #[test] fn clone_entity_using_reflect_all_paths() { - // `ReflectDefault`-based fast path + #[derive(PartialEq, Eq, Default, Debug)] + struct NotClone; + + // `reflect_clone`-based fast path #[derive(Component, Reflect, PartialEq, Eq, Default, Debug)] - #[reflect(Default)] #[reflect(from_reflect = false)] struct A { field: usize, field2: Vec, } - // `ReflectFromReflect`-based fast path + // `ReflectDefault`-based fast path #[derive(Component, Reflect, PartialEq, Eq, Default, Debug)] + #[reflect(Default)] + #[reflect(from_reflect = false)] struct B { field: usize, field2: Vec, + #[reflect(ignore)] + ignored: NotClone, + } + + // `ReflectFromReflect`-based fast path + #[derive(Component, Reflect, PartialEq, Eq, Default, Debug)] + struct C { + field: usize, + field2: Vec, + #[reflect(ignore)] + ignored: NotClone, } // `ReflectFromWorld`-based fast path #[derive(Component, Reflect, PartialEq, Eq, Default, Debug)] #[reflect(FromWorld)] #[reflect(from_reflect = false)] - struct C { + struct D { field: usize, field2: Vec, + #[reflect(ignore)] + ignored: NotClone, } let mut world = World::default(); world.init_resource::(); let registry = world.get_resource::().unwrap(); - registry.write().register::<(A, B, C)>(); + registry.write().register::<(A, B, C, D)>(); let a_id = world.register_component::(); let b_id = world.register_component::(); let c_id = world.register_component::(); + let d_id = world.register_component::(); let component_a = A { field: 5, field2: vec![1, 2, 3, 4, 5], }; let component_b = B { - field: 6, + field: 5, field2: vec![1, 2, 3, 4, 5], + ignored: NotClone, }; let component_c = C { + field: 6, + field2: vec![1, 2, 3, 4, 5], + ignored: NotClone, + }; + let component_d = D { field: 7, field2: vec![1, 2, 3, 4, 5], + ignored: NotClone, }; - let e = world.spawn((component_a, component_b, component_c)).id(); + let e = world + .spawn((component_a, component_b, component_c, component_d)) + .id(); let e_clone = world.spawn_empty().id(); EntityCloner::build(&mut world) .override_clone_behavior_with_id(a_id, ComponentCloneBehavior::reflect()) .override_clone_behavior_with_id(b_id, ComponentCloneBehavior::reflect()) .override_clone_behavior_with_id(c_id, ComponentCloneBehavior::reflect()) + .override_clone_behavior_with_id(d_id, ComponentCloneBehavior::reflect()) .clone_entity(e, e_clone); assert_eq!(world.get::(e_clone), Some(world.get::(e).unwrap())); assert_eq!(world.get::(e_clone), Some(world.get::(e).unwrap())); assert_eq!(world.get::(e_clone), Some(world.get::(e).unwrap())); + assert_eq!(world.get::(e_clone), Some(world.get::(e).unwrap())); } #[test] @@ -939,8 +991,9 @@ mod tests { #[derive(Component, Reflect)] struct B; - fn test_handler(_commands: &mut Commands, ctx: &mut ComponentCloneCtx) { - assert!(ctx.read_source_component_reflect().is_none()); + fn test_handler(source: &SourceComponent, ctx: &mut ComponentCloneCtx) { + let registry = ctx.type_registry().unwrap(); + assert!(source.read_reflect(®istry.read()).is_none()); } let mut world = World::default(); @@ -1000,16 +1053,16 @@ mod tests { #[derive(Component, PartialEq, Eq, Default, Debug)] struct A; - // No valid type data + // No valid type data and not `reflect_clone`-able #[derive(Component, Reflect, PartialEq, Eq, Default, Debug)] #[reflect(Component)] #[reflect(from_reflect = false)] - struct B; + struct B(#[reflect(ignore)] PhantomData<()>); let mut world = World::default(); // No AppTypeRegistry - let e = world.spawn((A, B)).id(); + let e = world.spawn((A, B(Default::default()))).id(); let e_clone = world.spawn_empty().id(); EntityCloner::build(&mut world) .override_clone_behavior::(ComponentCloneBehavior::reflect()) @@ -1023,7 +1076,7 @@ mod tests { let registry = world.get_resource::().unwrap(); registry.write().register::(); - let e = world.spawn((A, B)).id(); + let e = world.spawn((A, B(Default::default()))).id(); let e_clone = world.spawn_empty().id(); EntityCloner::build(&mut world).clone_entity(e, e_clone); assert_eq!(world.get::(e_clone), None); @@ -1176,7 +1229,7 @@ mod tests { struct A; #[derive(Component, Clone, PartialEq, Debug, Default)] - #[require(C(|| C(5)))] + #[require(C(5))] struct B; #[derive(Component, Clone, PartialEq, Debug)] @@ -1189,9 +1242,7 @@ mod tests { EntityCloner::build(&mut world) .deny_all() - .without_required_components(|builder| { - builder.allow::(); - }) + .allow::() .clone_entity(e, e_clone); assert_eq!(world.entity(e_clone).get::(), None); @@ -1199,20 +1250,43 @@ mod tests { assert_eq!(world.entity(e_clone).get::(), Some(&C(5))); } + #[test] + fn clone_entity_with_default_required_components() { + #[derive(Component, Clone, PartialEq, Debug)] + #[require(B)] + struct A; + + #[derive(Component, Clone, PartialEq, Debug, Default)] + #[require(C(5))] + struct B; + + #[derive(Component, Clone, PartialEq, Debug)] + struct C(u32); + + let mut world = World::default(); + + let e = world.spawn((A, C(0))).id(); + let e_clone = world.spawn_empty().id(); + + EntityCloner::build(&mut world) + .deny_all() + .without_required_components(|builder| { + builder.allow::(); + }) + .clone_entity(e, e_clone); + + assert_eq!(world.entity(e_clone).get::(), Some(&A)); + assert_eq!(world.entity(e_clone).get::(), Some(&B)); + assert_eq!(world.entity(e_clone).get::(), Some(&C(5))); + } + #[test] fn clone_entity_with_dynamic_components() { const COMPONENT_SIZE: usize = 10; - fn test_handler(_commands: &mut Commands, ctx: &mut ComponentCloneCtx) { - // SAFETY: this handler is only going to be used with a component represented by [u8; COMPONENT_SIZE] + fn test_handler(source: &SourceComponent, ctx: &mut ComponentCloneCtx) { + // SAFETY: the passed in ptr corresponds to copy-able data that matches the type of the source / target component unsafe { - ctx.write_target_component_ptr(move |source_ptr, target_ptr| { - core::ptr::copy_nonoverlapping( - source_ptr.as_ptr(), - target_ptr.as_ptr(), - COMPONENT_SIZE, - ); - true - }); + ctx.write_target_component_ptr(source.ptr()); } } @@ -1269,7 +1343,7 @@ mod tests { let clone_root = world.spawn_empty().id(); EntityCloner::build(&mut world) - .recursive(true) + .linked_cloning(true) .clone_entity(root, clone_root); let root_children = world @@ -1297,7 +1371,11 @@ mod tests { fn clone_with_reflect_from_world() { #[derive(Component, Reflect, PartialEq, Eq, Debug)] #[reflect(Component, FromWorld, from_reflect = false)] - struct SomeRef(#[entities] Entity); + struct SomeRef( + #[entities] Entity, + // We add an ignored field here to ensure `reflect_clone` fails and `FromWorld` is used + #[reflect(ignore)] PhantomData<()>, + ); #[derive(Resource)] struct FromWorldCalled(bool); @@ -1305,7 +1383,7 @@ mod tests { impl FromWorld for SomeRef { fn from_world(world: &mut World) -> Self { world.insert_resource(FromWorldCalled(true)); - SomeRef(Entity::PLACEHOLDER) + SomeRef(Entity::PLACEHOLDER, Default::default()) } } let mut world = World::new(); @@ -1315,14 +1393,17 @@ mod tests { let a = world.spawn_empty().id(); let b = world.spawn_empty().id(); - let c = world.spawn(SomeRef(a)).id(); + let c = world.spawn(SomeRef(a, Default::default())).id(); let d = world.spawn_empty().id(); let mut map = EntityHashMap::::new(); map.insert(a, b); map.insert(c, d); let cloned = EntityCloner::default().clone_entity_mapped(&mut world, c, &mut map); - assert_eq!(*world.entity(cloned).get::().unwrap(), SomeRef(b)); + assert_eq!( + *world.entity(cloned).get::().unwrap(), + SomeRef(b, Default::default()) + ); assert!(world.resource::().0); } } diff --git a/crates/bevy_ecs/src/entity/entity_set.rs b/crates/bevy_ecs/src/entity/entity_set.rs index 82ac75c163..e4860685fe 100644 --- a/crates/bevy_ecs/src/entity/entity_set.rs +++ b/crates/bevy_ecs/src/entity/entity_set.rs @@ -3,7 +3,7 @@ use alloc::{ collections::{btree_map, btree_set}, rc::Rc, }; -use bevy_platform_support::collections::HashSet; +use bevy_platform::collections::HashSet; use core::{ array, @@ -13,40 +13,61 @@ use core::{ option, result, }; -use super::{Entity, UniqueEntitySlice}; +use super::{Entity, UniqueEntityEquivalentSlice}; -use bevy_platform_support::sync::Arc; +use bevy_platform::sync::Arc; -/// A trait for entity borrows. +/// A trait for types that contain an [`Entity`]. /// -/// This trait can be thought of as `Borrow`, but yielding `Entity` directly. -pub trait EntityBorrow { - /// Returns the borrowed entity. +/// This trait behaves similarly to `Borrow`, but yielding `Entity` directly. +/// +/// It should only be implemented when: +/// - Retrieving the [`Entity`] is a simple operation. +/// - The [`Entity`] contained by the type is unambiguous. +pub trait ContainsEntity { + /// Returns the contained entity. fn entity(&self) -> Entity; } -/// A trait for [`Entity`] borrows with trustworthy comparison behavior. +/// A trait for types that represent an [`Entity`]. /// -/// Comparison trait behavior between a [`TrustedEntityBorrow`] type and its underlying entity will match. +/// Comparison trait behavior between an [`EntityEquivalent`] type and its underlying entity will match. /// This property includes [`PartialEq`], [`Eq`], [`PartialOrd`], [`Ord`] and [`Hash`], /// and remains even after [`Clone`] and/or [`Borrow`] calls. /// /// # Safety -/// Any [`PartialEq`], [`Eq`], [`PartialOrd`], [`Ord`], and [`Hash`] impls must be -/// equivalent for `Self` and its underlying entity: -/// `x.entity() == y.entity()` should give the same result as `x == y`. -/// The above equivalence must also hold through and between calls to any [`Clone`] -/// and [`Borrow`]/[`BorrowMut`] impls in place of [`entity()`]. +/// Any [`PartialEq`], [`Eq`], [`PartialOrd`], and [`Ord`] impls must evaluate the same for `Self` and +/// its underlying entity. +/// `x.entity() == y.entity()` must be equivalent to `x == y`. +/// +/// The above equivalence must also hold through and between calls to any [`Clone`] and +/// [`Borrow`]/[`BorrowMut`] impls in place of [`entity()`]. /// /// The result of [`entity()`] must be unaffected by any interior mutability. /// +/// The aforementioned properties imply determinism in both [`entity()`] calls +/// and comparison trait behavior. +/// +/// All [`Hash`] impls except that for [`Entity`] must delegate to the [`Hash`] impl of +/// another [`EntityEquivalent`] type. All conversions to the delegatee within the [`Hash`] impl must +/// follow [`entity()`] equivalence. +/// +/// It should be noted that [`Hash`] is *not* a comparison trait, and with [`Hash::hash`] being forcibly +/// generic over all [`Hasher`]s, **cannot** guarantee determinism or uniqueness of any final hash values +/// on its own. +/// To obtain hash values forming the same total order as [`Entity`], any [`Hasher`] used must be +/// deterministic and concerning [`Entity`], collisionless. +/// Standard library hash collections handle collisions with an [`Eq`] fallback, but do not account for +/// determinism when [`BuildHasher`] is unspecified,. +/// /// [`Hash`]: core::hash::Hash +/// [`Hasher`]: core::hash::Hasher /// [`Borrow`]: core::borrow::Borrow /// [`BorrowMut`]: core::borrow::BorrowMut -/// [`entity()`]: EntityBorrow::entity -pub unsafe trait TrustedEntityBorrow: EntityBorrow + Eq {} +/// [`entity()`]: ContainsEntity::entity +pub unsafe trait EntityEquivalent: ContainsEntity + Eq {} -impl EntityBorrow for Entity { +impl ContainsEntity for Entity { fn entity(&self) -> Entity { *self } @@ -54,9 +75,9 @@ impl EntityBorrow for Entity { // SAFETY: // The trait implementations of Entity are correct and deterministic. -unsafe impl TrustedEntityBorrow for Entity {} +unsafe impl EntityEquivalent for Entity {} -impl EntityBorrow for &T { +impl ContainsEntity for &T { fn entity(&self) -> Entity { (**self).entity() } @@ -66,9 +87,9 @@ impl EntityBorrow for &T { // `&T` delegates `PartialEq`, `Eq`, `PartialOrd`, `Ord`, and `Hash` to T. // `Clone` and `Borrow` maintain equality. // `&T` is `Freeze`. -unsafe impl TrustedEntityBorrow for &T {} +unsafe impl EntityEquivalent for &T {} -impl EntityBorrow for &mut T { +impl ContainsEntity for &mut T { fn entity(&self) -> Entity { (**self).entity() } @@ -78,9 +99,9 @@ impl EntityBorrow for &mut T { // `&mut T` delegates `PartialEq`, `Eq`, `PartialOrd`, `Ord`, and `Hash` to T. // `Borrow` and `BorrowMut` maintain equality. // `&mut T` is `Freeze`. -unsafe impl TrustedEntityBorrow for &mut T {} +unsafe impl EntityEquivalent for &mut T {} -impl EntityBorrow for Box { +impl ContainsEntity for Box { fn entity(&self) -> Entity { (**self).entity() } @@ -90,9 +111,9 @@ impl EntityBorrow for Box { // `Box` delegates `PartialEq`, `Eq`, `PartialOrd`, `Ord`, and `Hash` to T. // `Clone`, `Borrow` and `BorrowMut` maintain equality. // `Box` is `Freeze`. -unsafe impl TrustedEntityBorrow for Box {} +unsafe impl EntityEquivalent for Box {} -impl EntityBorrow for Rc { +impl ContainsEntity for Rc { fn entity(&self) -> Entity { (**self).entity() } @@ -102,9 +123,9 @@ impl EntityBorrow for Rc { // `Rc` delegates `PartialEq`, `Eq`, `PartialOrd`, `Ord`, and `Hash` to T. // `Clone`, `Borrow` and `BorrowMut` maintain equality. // `Rc` is `Freeze`. -unsafe impl TrustedEntityBorrow for Rc {} +unsafe impl EntityEquivalent for Rc {} -impl EntityBorrow for Arc { +impl ContainsEntity for Arc { fn entity(&self) -> Entity { (**self).entity() } @@ -114,7 +135,7 @@ impl EntityBorrow for Arc { // `Arc` delegates `PartialEq`, `Eq`, `PartialOrd`, `Ord`, and `Hash` to T. // `Clone`, `Borrow` and `BorrowMut` maintain equality. // `Arc` is `Freeze`. -unsafe impl TrustedEntityBorrow for Arc {} +unsafe impl EntityEquivalent for Arc {} /// A set of unique entities. /// @@ -146,7 +167,7 @@ impl> EntitySet for T {} /// /// `x != y` must hold for any 2 elements returned by the iterator. /// This is always true for iterators that cannot return more than one element. -pub unsafe trait EntitySetIterator: Iterator { +pub unsafe trait EntitySetIterator: Iterator { /// Transforms an `EntitySetIterator` into a collection. /// /// This is a specialized form of [`collect`], for collections which benefit from the uniqueness guarantee. @@ -164,89 +185,86 @@ pub unsafe trait EntitySetIterator: Iterator { // SAFETY: // A correct `BTreeMap` contains only unique keys. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeMap`. -unsafe impl EntitySetIterator for btree_map::Keys<'_, K, V> {} +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeMap`. +unsafe impl EntitySetIterator for btree_map::Keys<'_, K, V> {} // SAFETY: // A correct `BTreeMap` contains only unique keys. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeMap`. -unsafe impl EntitySetIterator for btree_map::IntoKeys {} +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeMap`. +unsafe impl EntitySetIterator for btree_map::IntoKeys {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. // The sub-range maintains uniqueness. -unsafe impl EntitySetIterator for btree_set::Range<'_, T> {} +unsafe impl EntitySetIterator for btree_set::Range<'_, T> {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. // The "intersection" operation maintains uniqueness. -unsafe impl EntitySetIterator for btree_set::Intersection<'_, T> {} +unsafe impl EntitySetIterator for btree_set::Intersection<'_, T> {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. // The "union" operation maintains uniqueness. -unsafe impl EntitySetIterator for btree_set::Union<'_, T> {} +unsafe impl EntitySetIterator for btree_set::Union<'_, T> {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. // The "difference" operation maintains uniqueness. -unsafe impl EntitySetIterator for btree_set::Difference<'_, T> {} +unsafe impl EntitySetIterator for btree_set::Difference<'_, T> {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. // The "symmetric difference" operation maintains uniqueness. -unsafe impl EntitySetIterator - for btree_set::SymmetricDifference<'_, T> -{ -} +unsafe impl EntitySetIterator for btree_set::SymmetricDifference<'_, T> {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. -unsafe impl EntitySetIterator for btree_set::Iter<'_, T> {} +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +unsafe impl EntitySetIterator for btree_set::Iter<'_, T> {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. -unsafe impl EntitySetIterator for btree_set::IntoIter {} +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +unsafe impl EntitySetIterator for btree_set::IntoIter {} // SAFETY: This iterator only returns one element. -unsafe impl EntitySetIterator for option::Iter<'_, T> {} +unsafe impl EntitySetIterator for option::Iter<'_, T> {} // SAFETY: This iterator only returns one element. -// unsafe impl EntitySetIterator for option::IterMut<'_, T> {} +// unsafe impl EntitySetIterator for option::IterMut<'_, T> {} // SAFETY: This iterator only returns one element. -unsafe impl EntitySetIterator for option::IntoIter {} +unsafe impl EntitySetIterator for option::IntoIter {} // SAFETY: This iterator only returns one element. -unsafe impl EntitySetIterator for result::Iter<'_, T> {} +unsafe impl EntitySetIterator for result::Iter<'_, T> {} // SAFETY: This iterator only returns one element. -// unsafe impl EntitySetIterator for result::IterMut<'_, T> {} +// unsafe impl EntitySetIterator for result::IterMut<'_, T> {} // SAFETY: This iterator only returns one element. -unsafe impl EntitySetIterator for result::IntoIter {} +unsafe impl EntitySetIterator for result::IntoIter {} // SAFETY: This iterator only returns one element. -unsafe impl EntitySetIterator for array::IntoIter {} +unsafe impl EntitySetIterator for array::IntoIter {} // SAFETY: This iterator does not return any elements. -unsafe impl EntitySetIterator for array::IntoIter {} +unsafe impl EntitySetIterator for array::IntoIter {} // SAFETY: This iterator only returns one element. -unsafe impl T> EntitySetIterator for iter::OnceWith {} +unsafe impl T> EntitySetIterator for iter::OnceWith {} // SAFETY: This iterator only returns one element. -unsafe impl EntitySetIterator for iter::Once {} +unsafe impl EntitySetIterator for iter::Once {} // SAFETY: This iterator does not return any elements. -unsafe impl EntitySetIterator for iter::Empty {} +unsafe impl EntitySetIterator for iter::Empty {} // SAFETY: Taking a mutable reference of an iterator has no effect on its elements. unsafe impl EntitySetIterator for &mut I {} @@ -254,14 +272,14 @@ unsafe impl EntitySetIterator for &mut I {} // SAFETY: Boxing an iterator has no effect on its elements. unsafe impl EntitySetIterator for Box {} -// SAFETY: TrustedEntityBorrow ensures that Copy does not affect equality, via its restrictions on Clone. -unsafe impl<'a, T: 'a + TrustedEntityBorrow + Copy, I: EntitySetIterator> +// SAFETY: EntityEquivalent ensures that Copy does not affect equality, via its restrictions on Clone. +unsafe impl<'a, T: 'a + EntityEquivalent + Copy, I: EntitySetIterator> EntitySetIterator for iter::Copied { } -// SAFETY: TrustedEntityBorrow ensures that Clone does not affect equality. -unsafe impl<'a, T: 'a + TrustedEntityBorrow + Clone, I: EntitySetIterator> +// SAFETY: EntityEquivalent ensures that Clone does not affect equality. +unsafe impl<'a, T: 'a + EntityEquivalent + Clone, I: EntitySetIterator> EntitySetIterator for iter::Cloned { } @@ -277,7 +295,7 @@ unsafe impl EntitySetIterator for iter::Fuse {} // SAFETY: // Obtaining immutable references the elements of an iterator does not affect uniqueness. -// TrustedEntityBorrow ensures the lack of interior mutability. +// EntityEquivalent ensures the lack of interior mutability. unsafe impl::Item)> EntitySetIterator for iter::Inspect { @@ -310,18 +328,18 @@ unsafe impl EntitySetIterator for iter::StepBy {} /// Conversion from an `EntitySetIterator`. /// /// Some collections, while they can be constructed from plain iterators, -/// benefit strongly from the additional uniqeness guarantee [`EntitySetIterator`] offers. +/// benefit strongly from the additional uniqueness guarantee [`EntitySetIterator`] offers. /// Mirroring [`Iterator::collect`]/[`FromIterator::from_iter`], [`EntitySetIterator::collect_set`] and /// `FromEntitySetIterator::from_entity_set_iter` can be used for construction. /// /// See also: [`EntitySet`]. // FIXME: When subtrait item shadowing stabilizes, this should be renamed and shadow `FromIterator::from_iter` -pub trait FromEntitySetIterator: FromIterator { +pub trait FromEntitySetIterator: FromIterator { /// Creates a value from an [`EntitySetIterator`]. fn from_entity_set_iter>(set_iter: T) -> Self; } -impl FromEntitySetIterator +impl FromEntitySetIterator for HashSet { fn from_entity_set_iter>(set_iter: I) -> Self { @@ -340,7 +358,7 @@ impl FromEntitySetItera /// An iterator that yields unique entities. /// /// This wrapper can provide an [`EntitySetIterator`] implementation when an instance of `I` is known to uphold uniqueness. -pub struct UniqueEntityIter> { +pub struct UniqueEntityIter> { iter: I, } @@ -351,7 +369,7 @@ impl UniqueEntityIter { } } -impl> UniqueEntityIter { +impl> UniqueEntityIter { /// Constructs a [`UniqueEntityIter`] from an iterator unsafely. /// /// # Safety @@ -382,7 +400,7 @@ impl> UniqueEntityIter { } } -impl> Iterator for UniqueEntityIter { +impl> Iterator for UniqueEntityIter { type Item = I::Item; fn next(&mut self) -> Option { @@ -394,42 +412,40 @@ impl> Iterator for UniqueEntityIter { } } -impl> ExactSizeIterator for UniqueEntityIter {} +impl> ExactSizeIterator for UniqueEntityIter {} -impl> DoubleEndedIterator - for UniqueEntityIter -{ +impl> DoubleEndedIterator for UniqueEntityIter { fn next_back(&mut self) -> Option { self.iter.next_back() } } -impl> FusedIterator for UniqueEntityIter {} +impl> FusedIterator for UniqueEntityIter {} // SAFETY: The underlying iterator is ensured to only return unique elements by its construction. -unsafe impl> EntitySetIterator for UniqueEntityIter {} +unsafe impl> EntitySetIterator for UniqueEntityIter {} -impl + AsRef<[T]>> AsRef<[T]> for UniqueEntityIter { +impl + AsRef<[T]>> AsRef<[T]> for UniqueEntityIter { fn as_ref(&self) -> &[T] { self.iter.as_ref() } } -impl + AsRef<[T]>> - AsRef> for UniqueEntityIter +impl + AsRef<[T]>> + AsRef> for UniqueEntityIter { - fn as_ref(&self) -> &UniqueEntitySlice { + fn as_ref(&self) -> &UniqueEntityEquivalentSlice { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(self.iter.as_ref()) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.iter.as_ref()) } } } -impl + AsMut<[T]>> - AsMut> for UniqueEntityIter +impl + AsMut<[T]>> + AsMut> for UniqueEntityIter { - fn as_mut(&mut self) -> &mut UniqueEntitySlice { + fn as_mut(&mut self) -> &mut UniqueEntityEquivalentSlice { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(self.iter.as_mut()) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.iter.as_mut()) } } } @@ -451,7 +467,7 @@ impl Clone for UniqueEntityIter { } } -impl + Debug> Debug for UniqueEntityIter { +impl + Debug> Debug for UniqueEntityIter { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { f.debug_struct("UniqueEntityIter") .field("iter", &self.iter) diff --git a/crates/bevy_ecs/src/entity/hash.rs b/crates/bevy_ecs/src/entity/hash.rs index bf957328ef..a538473439 100644 --- a/crates/bevy_ecs/src/entity/hash.rs +++ b/crates/bevy_ecs/src/entity/hash.rs @@ -1,11 +1,11 @@ use core::hash::{BuildHasher, Hasher}; #[cfg(feature = "bevy_reflect")] -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; /// A [`BuildHasher`] that results in a [`EntityHasher`]. #[derive(Debug, Default, Clone)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Default, Clone))] pub struct EntityHash; impl BuildHasher for EntityHash { @@ -25,7 +25,7 @@ impl BuildHasher for EntityHash { /// /// If you have an unusual case -- say all your indices are multiples of 256 /// or most of the entities are dead generations -- then you might want also to -/// try [`DefaultHasher`](bevy_platform_support::hash::DefaultHasher) for a slower hash +/// try [`DefaultHasher`](bevy_platform::hash::DefaultHasher) for a slower hash /// computation but fewer lookup conflicts. #[derive(Debug, Default)] pub struct EntityHasher { diff --git a/crates/bevy_ecs/src/entity/hash_map.rs b/crates/bevy_ecs/src/entity/hash_map.rs index 39fbfb1d87..d83ea7bae1 100644 --- a/crates/bevy_ecs/src/entity/hash_map.rs +++ b/crates/bevy_ecs/src/entity/hash_map.rs @@ -9,11 +9,11 @@ use core::{ ops::{Deref, DerefMut, Index}, }; -use bevy_platform_support::collections::hash_map::{self, HashMap}; +use bevy_platform::collections::hash_map::{self, HashMap}; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; -use super::{Entity, EntityHash, EntitySetIterator, TrustedEntityBorrow}; +use super::{Entity, EntityEquivalent, EntityHash, EntitySetIterator}; /// A [`HashMap`] pre-configured to use [`EntityHash`] hashing. #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] @@ -113,7 +113,7 @@ impl FromIterator<(Entity, V)> for EntityHashMap { } } -impl Index<&Q> for EntityHashMap { +impl Index<&Q> for EntityHashMap { type Output = V; fn index(&self, key: &Q) -> &V { self.0.index(&key.entity()) diff --git a/crates/bevy_ecs/src/entity/hash_set.rs b/crates/bevy_ecs/src/entity/hash_set.rs index 91c1000c91..7fd1ae9011 100644 --- a/crates/bevy_ecs/src/entity/hash_set.rs +++ b/crates/bevy_ecs/src/entity/hash_set.rs @@ -12,7 +12,7 @@ use core::{ }, }; -use bevy_platform_support::collections::hash_set::{self, HashSet}; +use bevy_platform::collections::hash_set::{self, HashSet}; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; diff --git a/crates/bevy_ecs/src/entity/index_map.rs b/crates/bevy_ecs/src/entity/index_map.rs index faf81c11dc..6f6cd1bb47 100644 --- a/crates/bevy_ecs/src/entity/index_map.rs +++ b/crates/bevy_ecs/src/entity/index_map.rs @@ -1,16 +1,27 @@ +//! Contains the [`EntityIndexMap`] type, an [`IndexMap`] pre-configured to use [`EntityHash`] hashing. +//! +//! This module is a lightweight wrapper around `indexmap`'s [`IndexMap`] that is more performant for [`Entity`] keys. + use core::{ + cmp::Ordering, fmt::{self, Debug, Formatter}, - hash::BuildHasher, + hash::{BuildHasher, Hash, Hasher}, iter::FusedIterator, marker::PhantomData, - ops::{Deref, DerefMut, Index, IndexMut, RangeBounds}, + ops::{ + Bound, Deref, DerefMut, Index, IndexMut, Range, RangeBounds, RangeFrom, RangeFull, + RangeInclusive, RangeTo, RangeToInclusive, + }, + ptr, }; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; -use indexmap::map::{self, IndexMap}; +use indexmap::map::{self, IndexMap, IntoValues, ValuesMut}; -use super::{Entity, EntityHash, EntitySetIterator, TrustedEntityBorrow}; +use super::{Entity, EntityEquivalent, EntityHash, EntitySetIterator}; + +use bevy_platform::prelude::Box; /// A [`IndexMap`] pre-configured to use [`EntityHash`] hashing. #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] @@ -42,6 +53,48 @@ impl EntityIndexMap { self.0 } + /// Returns a slice of all the key-value pairs in the map. + /// + /// Equivalent to [`IndexMap::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } + + /// Returns a mutable slice of all the key-value pairs in the map. + /// + /// Equivalent to [`IndexMap::as_mut_slice`]. + pub fn as_mut_slice(&mut self) -> &mut Slice { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { Slice::from_slice_unchecked_mut(self.0.as_mut_slice()) } + } + + /// Converts into a boxed slice of all the key-value pairs in the map. + /// + /// Equivalent to [`IndexMap::into_boxed_slice`]. + pub fn into_boxed_slice(self) -> Box> { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { Slice::from_boxed_slice_unchecked(self.0.into_boxed_slice()) } + } + + /// Returns a slice of key-value pairs in the given range of indices. + /// + /// Equivalent to [`IndexMap::get_range`]. + pub fn get_range>(&self, range: R) -> Option<&Slice> { + self.0.get_range(range).map(|slice| + // SAFETY: EntityIndexSetSlice is a transparent wrapper around indexmap::set::Slice. + unsafe { Slice::from_slice_unchecked(slice) }) + } + + /// Returns a mutable slice of key-value pairs in the given range of indices. + /// + /// Equivalent to [`IndexMap::get_range_mut`]. + pub fn get_range_mut>(&mut self, range: R) -> Option<&mut Slice> { + self.0.get_range_mut(range).map(|slice| + // SAFETY: EntityIndexSetSlice is a transparent wrapper around indexmap::set::Slice. + unsafe { Slice::from_slice_unchecked_mut(slice) }) + } + /// Return an iterator over the key-value pairs of the map, in their order. /// /// Equivalent to [`IndexMap::iter`]. @@ -123,13 +176,69 @@ impl FromIterator<(Entity, V)> for EntityIndexMap { } } -impl Index<&Q> for EntityIndexMap { +impl Index<&Q> for EntityIndexMap { type Output = V; fn index(&self, key: &Q) -> &V { self.0.index(&key.entity()) } } +impl Index<(Bound, Bound)> for EntityIndexMap { + type Output = Slice; + fn index(&self, key: (Bound, Bound)) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexMap { + type Output = Slice; + fn index(&self, key: Range) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexMap { + type Output = Slice; + fn index(&self, key: RangeFrom) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for EntityIndexMap { + type Output = Slice; + fn index(&self, key: RangeFull) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexMap { + type Output = Slice; + fn index(&self, key: RangeInclusive) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexMap { + type Output = Slice; + fn index(&self, key: RangeTo) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexMap { + type Output = Slice; + fn index(&self, key: RangeToInclusive) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + impl Index for EntityIndexMap { type Output = V; fn index(&self, key: usize) -> &V { @@ -137,12 +246,61 @@ impl Index for EntityIndexMap { } } -impl IndexMut<&Q> for EntityIndexMap { +impl IndexMut<&Q> for EntityIndexMap { fn index_mut(&mut self, key: &Q) -> &mut V { self.0.index_mut(&key.entity()) } } +impl IndexMut<(Bound, Bound)> for EntityIndexMap { + fn index_mut(&mut self, key: (Bound, Bound)) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for EntityIndexMap { + fn index_mut(&mut self, key: Range) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for EntityIndexMap { + fn index_mut(&mut self, key: RangeFrom) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut for EntityIndexMap { + fn index_mut(&mut self, key: RangeFull) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for EntityIndexMap { + fn index_mut(&mut self, key: RangeInclusive) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for EntityIndexMap { + fn index_mut(&mut self, key: RangeTo) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for EntityIndexMap { + fn index_mut(&mut self, key: RangeToInclusive) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + impl IndexMut for EntityIndexMap { fn index_mut(&mut self, key: usize) -> &mut V { self.0.index_mut(key) @@ -197,6 +355,476 @@ where impl Eq for EntityIndexMap {} +/// A dynamically-sized slice of key-value pairs in an [`EntityIndexMap`]. +/// +/// Equivalent to an [`indexmap::map::Slice`] whose source [`IndexMap`] +/// uses [`EntityHash`]. +#[repr(transparent)] +pub struct Slice(PhantomData, map::Slice); + +impl Slice { + /// Returns an empty slice. + /// + /// Equivalent to [`map::Slice::new`]. + pub const fn new<'a>() -> &'a Self { + // SAFETY: The source slice is empty. + unsafe { Self::from_slice_unchecked(map::Slice::new()) } + } + + /// Returns an empty mutable slice. + /// + /// Equivalent to [`map::Slice::new_mut`]. + pub fn new_mut<'a>() -> &'a mut Self { + // SAFETY: The source slice is empty. + unsafe { Self::from_slice_unchecked_mut(map::Slice::new_mut()) } + } + + /// Constructs a [`entity::index_map::Slice`] from a [`indexmap::map::Slice`] unsafely. + /// + /// # Safety + /// + /// `slice` must stem from an [`IndexMap`] using [`EntityHash`]. + /// + /// [`entity::index_map::Slice`]: `crate::entity::index_map::Slice` + pub const unsafe fn from_slice_unchecked(slice: &map::Slice) -> &Self { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { &*(ptr::from_ref(slice) as *const Self) } + } + + /// Constructs a [`entity::index_map::Slice`] from a [`indexmap::map::Slice`] unsafely. + /// + /// # Safety + /// + /// `slice` must stem from an [`IndexMap`] using [`EntityHash`]. + /// + /// [`entity::index_map::Slice`]: `crate::entity::index_map::Slice` + pub const unsafe fn from_slice_unchecked_mut(slice: &mut map::Slice) -> &mut Self { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { &mut *(ptr::from_mut(slice) as *mut Self) } + } + + /// Casts `self` to the inner slice. + pub const fn as_inner(&self) -> &map::Slice { + &self.1 + } + + /// Constructs a boxed [`entity::index_map::Slice`] from a boxed [`indexmap::map::Slice`] unsafely. + /// + /// # Safety + /// + /// `slice` must stem from an [`IndexMap`] using [`EntityHash`]. + /// + /// [`entity::index_map::Slice`]: `crate::entity::index_map::Slice` + pub unsafe fn from_boxed_slice_unchecked(slice: Box>) -> Box { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { Box::from_raw(Box::into_raw(slice) as *mut Self) } + } + + /// Casts a reference to `self` to the inner slice. + #[expect( + clippy::borrowed_box, + reason = "We wish to access the Box API of the inner type, without consuming it." + )] + pub fn as_boxed_inner(self: &Box) -> &Box> { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { &*(ptr::from_ref(self).cast::>>()) } + } + + /// Casts `self` to the inner slice. + pub fn into_boxed_inner(self: Box) -> Box> { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { Box::from_raw(Box::into_raw(self) as *mut map::Slice) } + } + + /// Get a key-value pair by index, with mutable access to the value. + /// + /// Equivalent to [`map::Slice::get_index_mut`]. + pub fn get_index_mut(&mut self, index: usize) -> Option<(&Entity, &mut V)> { + self.1.get_index_mut(index) + } + + /// Returns a slice of key-value pairs in the given range of indices. + /// + /// Equivalent to [`map::Slice::get_range`]. + pub fn get_range>(&self, range: R) -> Option<&Self> { + self.1.get_range(range).map(|slice| + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(slice) }) + } + + /// Returns a mutable slice of key-value pairs in the given range of indices. + /// + /// Equivalent to [`map::Slice::get_range_mut`]. + pub fn get_range_mut>(&mut self, range: R) -> Option<&mut Self> { + self.1.get_range_mut(range).map(|slice| + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(slice) }) + } + + /// Get the first key-value pair, with mutable access to the value. + /// + /// Equivalent to [`map::Slice::first_mut`]. + pub fn first_mut(&mut self) -> Option<(&Entity, &mut V)> { + self.1.first_mut() + } + + /// Get the last key-value pair, with mutable access to the value. + /// + /// Equivalent to [`map::Slice::last_mut`]. + pub fn last_mut(&mut self) -> Option<(&Entity, &mut V)> { + self.1.last_mut() + } + + /// Divides one slice into two at an index. + /// + /// Equivalent to [`map::Slice::split_at`]. + pub fn split_at(&self, index: usize) -> (&Self, &Self) { + let (slice_1, slice_2) = self.1.split_at(index); + // SAFETY: These are subslices of a valid slice. + unsafe { + ( + Self::from_slice_unchecked(slice_1), + Self::from_slice_unchecked(slice_2), + ) + } + } + + /// Divides one mutable slice into two at an index. + /// + /// Equivalent to [`map::Slice::split_at_mut`]. + pub fn split_at_mut(&mut self, index: usize) -> (&mut Self, &mut Self) { + let (slice_1, slice_2) = self.1.split_at_mut(index); + // SAFETY: These are subslices of a valid slice. + unsafe { + ( + Self::from_slice_unchecked_mut(slice_1), + Self::from_slice_unchecked_mut(slice_2), + ) + } + } + + /// Returns the first key-value pair and the rest of the slice, + /// or `None` if it is empty. + /// + /// Equivalent to [`map::Slice::split_first`]. + pub fn split_first(&self) -> Option<((&Entity, &V), &Self)> { + self.1.split_first().map(|(first, rest)| { + ( + first, + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(rest) }, + ) + }) + } + + /// Returns the first key-value pair and the rest of the slice, + /// with mutable access to the value, or `None` if it is empty. + /// + /// Equivalent to [`map::Slice::split_first_mut`]. + pub fn split_first_mut(&mut self) -> Option<((&Entity, &mut V), &mut Self)> { + self.1.split_first_mut().map(|(first, rest)| { + ( + first, + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(rest) }, + ) + }) + } + + /// Returns the last key-value pair and the rest of the slice, + /// or `None` if it is empty. + /// + /// Equivalent to [`map::Slice::split_last`]. + pub fn split_last(&self) -> Option<((&Entity, &V), &Self)> { + self.1.split_last().map(|(last, rest)| { + ( + last, + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(rest) }, + ) + }) + } + + /// Returns the last key-value pair and the rest of the slice, + /// with mutable access to the value, or `None` if it is empty. + /// + /// Equivalent to [`map::Slice::split_last_mut`]. + pub fn split_last_mut(&mut self) -> Option<((&Entity, &mut V), &mut Self)> { + self.1.split_last_mut().map(|(last, rest)| { + ( + last, + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(rest) }, + ) + }) + } + + /// Return an iterator over the key-value pairs of the map slice. + /// + /// Equivalent to [`map::Slice::iter`]. + pub fn iter(&self) -> Iter<'_, V> { + Iter(self.1.iter(), PhantomData) + } + + /// Return an iterator over the key-value pairs of the map slice. + /// + /// Equivalent to [`map::Slice::iter_mut`]. + pub fn iter_mut(&mut self) -> IterMut<'_, V> { + IterMut(self.1.iter_mut(), PhantomData) + } + + /// Return an iterator over the keys of the map slice. + /// + /// Equivalent to [`map::Slice::keys`]. + pub fn keys(&self) -> Keys<'_, V> { + Keys(self.1.keys(), PhantomData) + } + + /// Return an owning iterator over the keys of the map slice. + /// + /// Equivalent to [`map::Slice::into_keys`]. + pub fn into_keys(self: Box) -> IntoKeys { + IntoKeys(self.into_boxed_inner().into_keys(), PhantomData) + } + + /// Return an iterator over mutable references to the the values of the map slice. + /// + /// Equivalent to [`map::Slice::values_mut`]. + pub fn values_mut(&mut self) -> ValuesMut<'_, Entity, V> { + self.1.values_mut() + } + + /// Return an owning iterator over the values of the map slice. + /// + /// Equivalent to [`map::Slice::into_values`]. + pub fn into_values(self: Box) -> IntoValues { + self.into_boxed_inner().into_values() + } +} + +impl Deref for Slice { + type Target = map::Slice; + + fn deref(&self) -> &Self::Target { + &self.1 + } +} + +impl Debug for Slice { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("Slice") + .field(&self.0) + .field(&&self.1) + .finish() + } +} + +impl Clone for Box> { + fn clone(&self) -> Self { + // SAFETY: This a clone of a valid slice. + unsafe { Slice::from_boxed_slice_unchecked(self.as_boxed_inner().clone()) } + } +} + +impl Default for &Slice { + fn default() -> Self { + // SAFETY: The source slice is empty. + unsafe { Slice::from_slice_unchecked(<&map::Slice>::default()) } + } +} + +impl Default for &mut Slice { + fn default() -> Self { + // SAFETY: The source slice is empty. + unsafe { Slice::from_slice_unchecked_mut(<&mut map::Slice>::default()) } + } +} + +impl Default for Box> { + fn default() -> Self { + // SAFETY: The source slice is empty. + unsafe { Slice::from_boxed_slice_unchecked(>>::default()) } + } +} + +impl From<&Slice> for Box> { + fn from(value: &Slice) -> Self { + // SAFETY: This slice is a copy of a valid slice. + unsafe { Slice::from_boxed_slice_unchecked(value.1.into()) } + } +} + +impl Hash for Slice { + fn hash(&self, state: &mut H) { + self.1.hash(state); + } +} + +impl<'a, V> IntoIterator for &'a Slice { + type Item = (&'a Entity, &'a V); + type IntoIter = Iter<'a, V>; + + fn into_iter(self) -> Self::IntoIter { + Iter(self.1.iter(), PhantomData) + } +} + +impl<'a, V> IntoIterator for &'a mut Slice { + type Item = (&'a Entity, &'a mut V); + type IntoIter = IterMut<'a, V>; + + fn into_iter(self) -> Self::IntoIter { + IterMut(self.1.iter_mut(), PhantomData) + } +} + +impl IntoIterator for Box> { + type Item = (Entity, V); + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter(self.into_boxed_inner().into_iter(), PhantomData) + } +} + +impl PartialOrd for Slice { + fn partial_cmp(&self, other: &Self) -> Option { + self.1.partial_cmp(&other.1) + } +} + +impl Ord for Slice { + fn cmp(&self, other: &Self) -> Ordering { + self.1.cmp(other) + } +} + +impl PartialEq for Slice { + fn eq(&self, other: &Self) -> bool { + self.1 == other.1 + } +} + +impl Eq for Slice {} + +impl Index<(Bound, Bound)> for Slice { + type Output = Self; + fn index(&self, key: (Bound, Bound)) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: Range) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeFrom) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index for Slice { + type Output = Self; + fn index(&self, key: RangeFull) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeInclusive) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeTo) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeToInclusive) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index for Slice { + type Output = V; + fn index(&self, key: usize) -> &V { + self.1.index(key) + } +} + +impl IndexMut<(Bound, Bound)> for Slice { + fn index_mut(&mut self, key: (Bound, Bound)) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut> for Slice { + fn index_mut(&mut self, key: Range) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut> for Slice { + fn index_mut(&mut self, key: RangeFrom) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut for Slice { + fn index_mut(&mut self, key: RangeFull) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut> for Slice { + fn index_mut(&mut self, key: RangeInclusive) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut> for Slice { + fn index_mut(&mut self, key: RangeTo) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut> for Slice { + fn index_mut(&mut self, key: RangeToInclusive) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut for Slice { + fn index_mut(&mut self, key: usize) -> &mut V { + self.1.index_mut(key) + } +} + /// An iterator over the entries of an [`EntityIndexMap`]. /// /// This `struct` is created by the [`EntityIndexMap::iter`] method. @@ -208,6 +836,14 @@ impl<'a, V> Iter<'a, V> { pub fn into_inner(self) -> map::Iter<'a, Entity, V> { self.0 } + + /// Returns a slice of the remaining entries in the iterator. + /// + /// Equivalent to [`map::Iter::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } } impl<'a, V> Deref for Iter<'a, V> { @@ -265,6 +901,22 @@ impl<'a, V> IterMut<'a, V> { pub fn into_inner(self) -> map::IterMut<'a, Entity, V> { self.0 } + + /// Returns a slice of the remaining entries in the iterator. + /// + /// Equivalent to [`map::IterMut::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } + + /// Returns a mutable slice of the remaining entries in the iterator. + /// + /// Equivalent to [`map::IterMut::into_slice`]. + pub fn into_slice(self) -> &'a mut Slice { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.into_slice()) } + } } impl<'a, V> Deref for IterMut<'a, V> { @@ -319,6 +971,22 @@ impl IntoIter { pub fn into_inner(self) -> map::IntoIter { self.0 } + + /// Returns a slice of the remaining entries in the iterator. + /// + /// Equivalent to [`map::IntoIter::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } + + /// Returns a mutable slice of the remaining entries in the iterator. + /// + /// Equivalent to [`map::IntoIter::as_mut_slice`]. + pub fn as_mut_slice(&mut self) -> &mut Slice { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.as_mut_slice()) } + } } impl Deref for IntoIter { @@ -379,6 +1047,14 @@ impl<'a, V> Drain<'a, V> { pub fn into_inner(self) -> map::Drain<'a, Entity, V> { self.0 } + + /// Returns a slice of the remaining entries in the iterator. + /// + /// Equivalent to [`map::Drain::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } } impl<'a, V> Deref for Drain<'a, V> { diff --git a/crates/bevy_ecs/src/entity/index_set.rs b/crates/bevy_ecs/src/entity/index_set.rs index 7a7cecda82..42f420a211 100644 --- a/crates/bevy_ecs/src/entity/index_set.rs +++ b/crates/bevy_ecs/src/entity/index_set.rs @@ -1,15 +1,27 @@ +//! Contains the [`EntityIndexSet`] type, a [`IndexSet`] pre-configured to use [`EntityHash`] hashing. +//! +//! This module is a lightweight wrapper around `indexmap`'ss [`IndexSet`] that is more performant for [`Entity`] keys. + use core::{ + cmp::Ordering, fmt::{self, Debug, Formatter}, hash::BuildHasher, + hash::{Hash, Hasher}, iter::FusedIterator, marker::PhantomData, - ops::{BitAnd, BitOr, BitXor, Deref, DerefMut, Index, RangeBounds, Sub}, + ops::{ + BitAnd, BitOr, BitXor, Bound, Deref, DerefMut, Index, Range, RangeBounds, RangeFrom, + RangeFull, RangeInclusive, RangeTo, RangeToInclusive, Sub, + }, + ptr, }; use indexmap::set::{self, IndexSet}; use super::{Entity, EntityHash, EntitySetIterator}; +use bevy_platform::prelude::Box; + /// An [`IndexSet`] pre-configured to use [`EntityHash`] hashing. #[cfg_attr(feature = "serialize", derive(serde::Deserialize, serde::Serialize))] #[derive(Debug, Clone, Default)] @@ -39,6 +51,14 @@ impl EntityIndexSet { self.0 } + /// Returns a slice of all the values in the set. + /// + /// Equivalent to [`IndexSet::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } + /// Clears the `IndexSet` in the given index range, returning those values /// as a drain iterator. /// @@ -47,12 +67,29 @@ impl EntityIndexSet { Drain(self.0.drain(range), PhantomData) } + /// Returns a slice of values in the given range of indices. + /// + /// Equivalent to [`IndexSet::get_range`]. + pub fn get_range>(&self, range: R) -> Option<&Slice> { + self.0.get_range(range).map(|slice| + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(slice) }) + } + /// Return an iterator over the values of the set, in their order. /// /// Equivalent to [`IndexSet::iter`]. pub fn iter(&self) -> Iter<'_> { Iter(self.0.iter(), PhantomData) } + + /// Converts into a boxed slice of all the values in the set. + /// + /// Equivalent to [`IndexSet::into_boxed_slice`]. + pub fn into_boxed_slice(self) -> Box { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { Slice::from_boxed_slice_unchecked(self.0.into_boxed_slice()) } + } } impl Deref for EntityIndexSet { @@ -162,6 +199,62 @@ impl PartialEq for EntityIndexSet { impl Eq for EntityIndexSet {} +impl Index<(Bound, Bound)> for EntityIndexSet { + type Output = Slice; + fn index(&self, key: (Bound, Bound)) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexSet { + type Output = Slice; + fn index(&self, key: Range) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexSet { + type Output = Slice; + fn index(&self, key: RangeFrom) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for EntityIndexSet { + type Output = Slice; + fn index(&self, key: RangeFull) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexSet { + type Output = Slice; + fn index(&self, key: RangeInclusive) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexSet { + type Output = Slice; + fn index(&self, key: RangeTo) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexSet { + type Output = Slice; + fn index(&self, key: RangeToInclusive) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + impl Index for EntityIndexSet { type Output = Entity; fn index(&self, key: usize) -> &Entity { @@ -169,6 +262,290 @@ impl Index for EntityIndexSet { } } +/// A dynamically-sized slice of values in an [`EntityIndexSet`]. +/// +/// Equivalent to an [`indexmap::set::Slice`] whose source [`IndexSet`] +/// uses [`EntityHash`]. +#[repr(transparent)] +pub struct Slice(PhantomData, set::Slice); + +impl Slice { + /// Returns an empty slice. + /// + /// Equivalent to [`set::Slice::new`]. + pub const fn new<'a>() -> &'a Self { + // SAFETY: The source slice is empty. + unsafe { Self::from_slice_unchecked(set::Slice::new()) } + } + + /// Constructs a [`entity::index_set::Slice`] from a [`indexmap::set::Slice`] unsafely. + /// + /// # Safety + /// + /// `slice` must stem from an [`IndexSet`] using [`EntityHash`]. + /// + /// [`entity::index_set::Slice`]: `crate::entity::index_set::Slice` + pub const unsafe fn from_slice_unchecked(slice: &set::Slice) -> &Self { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { &*(ptr::from_ref(slice) as *const Self) } + } + + /// Constructs a [`entity::index_set::Slice`] from a [`indexmap::set::Slice`] unsafely. + /// + /// # Safety + /// + /// `slice` must stem from an [`IndexSet`] using [`EntityHash`]. + /// + /// [`entity::index_set::Slice`]: `crate::entity::index_set::Slice` + pub const unsafe fn from_slice_unchecked_mut(slice: &mut set::Slice) -> &mut Self { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { &mut *(ptr::from_mut(slice) as *mut Self) } + } + + /// Casts `self` to the inner slice. + pub const fn as_inner(&self) -> &set::Slice { + &self.1 + } + + /// Constructs a boxed [`entity::index_set::Slice`] from a boxed [`indexmap::set::Slice`] unsafely. + /// + /// # Safety + /// + /// `slice` must stem from an [`IndexSet`] using [`EntityHash`]. + /// + /// [`entity::index_set::Slice`]: `crate::entity::index_set::Slice` + pub unsafe fn from_boxed_slice_unchecked(slice: Box>) -> Box { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { Box::from_raw(Box::into_raw(slice) as *mut Self) } + } + + /// Casts a reference to `self` to the inner slice. + #[expect( + clippy::borrowed_box, + reason = "We wish to access the Box API of the inner type, without consuming it." + )] + pub fn as_boxed_inner(self: &Box) -> &Box> { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { &*(ptr::from_ref(self).cast::>>()) } + } + + /// Casts `self` to the inner slice. + pub fn into_boxed_inner(self: Box) -> Box> { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { Box::from_raw(Box::into_raw(self) as *mut set::Slice) } + } + + /// Returns a slice of values in the given range of indices. + /// + /// Equivalent to [`set::Slice::get_range`]. + pub fn get_range>(&self, range: R) -> Option<&Self> { + self.1.get_range(range).map(|slice| + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(slice) }) + } + + /// Divides one slice into two at an index. + /// + /// Equivalent to [`set::Slice::split_at`]. + pub fn split_at(&self, index: usize) -> (&Self, &Self) { + let (slice_1, slice_2) = self.1.split_at(index); + // SAFETY: These are subslices of a valid slice. + unsafe { + ( + Self::from_slice_unchecked(slice_1), + Self::from_slice_unchecked(slice_2), + ) + } + } + + /// Returns the first value and the rest of the slice, + /// or `None` if it is empty. + /// + /// Equivalent to [`set::Slice::split_first`]. + pub fn split_first(&self) -> Option<(&Entity, &Self)> { + self.1.split_first().map(|(first, rest)| { + ( + first, + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(rest) }, + ) + }) + } + + /// Returns the last value and the rest of the slice, + /// or `None` if it is empty. + /// + /// Equivalent to [`set::Slice::split_last`]. + pub fn split_last(&self) -> Option<(&Entity, &Self)> { + self.1.split_last().map(|(last, rest)| { + ( + last, + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(rest) }, + ) + }) + } + + /// Return an iterator over the values of the set slice. + /// + /// Equivalent to [`set::Slice::iter`]. + pub fn iter(&self) -> Iter<'_> { + Iter(self.1.iter(), PhantomData) + } +} + +impl Deref for Slice { + type Target = set::Slice; + + fn deref(&self) -> &Self::Target { + &self.1 + } +} + +impl<'a> IntoIterator for &'a Slice { + type IntoIter = Iter<'a>; + type Item = &'a Entity; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for Box { + type IntoIter = IntoIter; + type Item = Entity; + + fn into_iter(self) -> Self::IntoIter { + IntoIter(self.into_boxed_inner().into_iter(), PhantomData) + } +} + +impl Clone for Box { + fn clone(&self) -> Self { + // SAFETY: This is a clone of a valid slice. + unsafe { Slice::from_boxed_slice_unchecked(self.as_boxed_inner().clone()) } + } +} + +impl Default for &Slice { + fn default() -> Self { + // SAFETY: The source slice is empty. + unsafe { Slice::from_slice_unchecked(<&set::Slice>::default()) } + } +} + +impl Default for Box { + fn default() -> Self { + // SAFETY: The source slice is empty. + unsafe { Slice::from_boxed_slice_unchecked(>>::default()) } + } +} + +impl Debug for Slice { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("Slice") + .field(&self.0) + .field(&&self.1) + .finish() + } +} + +impl From<&Slice> for Box { + fn from(value: &Slice) -> Self { + // SAFETY: This slice is a copy of a valid slice. + unsafe { Slice::from_boxed_slice_unchecked(value.1.into()) } + } +} + +impl Hash for Slice { + fn hash(&self, state: &mut H) { + self.1.hash(state); + } +} + +impl PartialOrd for Slice { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Slice { + fn cmp(&self, other: &Self) -> Ordering { + self.1.cmp(other) + } +} + +impl PartialEq for Slice { + fn eq(&self, other: &Self) -> bool { + self.1 == other.1 + } +} + +impl Eq for Slice {} + +impl Index<(Bound, Bound)> for Slice { + type Output = Self; + fn index(&self, key: (Bound, Bound)) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: Range) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Slice; + fn index(&self, key: RangeFrom) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index for Slice { + type Output = Self; + fn index(&self, key: RangeFull) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeInclusive) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeTo) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeToInclusive) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index for Slice { + type Output = Entity; + fn index(&self, key: usize) -> &Entity { + self.1.index(key) + } +} + /// An iterator over the items of an [`EntityIndexSet`]. /// /// This struct is created by the [`iter`] method on [`EntityIndexSet`]. See its documentation for more. @@ -181,6 +558,14 @@ impl<'a> Iter<'a> { pub fn into_inner(self) -> set::Iter<'a, Entity> { self.0 } + + /// Returns a slice of the remaining entries in the iterator. + /// + /// Equivalent to [`set::Iter::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } } impl<'a> Deref for Iter<'a> { @@ -242,6 +627,14 @@ impl IntoIter { pub fn into_inner(self) -> set::IntoIter { self.0 } + + /// Returns a slice of the remaining entries in the iterator. + /// + /// Equivalent to [`set::IntoIter::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } } impl Deref for IntoIter { @@ -306,6 +699,14 @@ impl<'a> Drain<'a> { pub fn into_inner(self) -> set::Drain<'a, Entity> { self.0 } + + /// Returns a slice of the remaining entries in the iterator.$ + /// + /// Equivalent to [`set::Drain::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } } impl<'a> Deref for Drain<'a> { diff --git a/crates/bevy_ecs/src/entity/map_entities.rs b/crates/bevy_ecs/src/entity/map_entities.rs index caa02eaeac..c79853f979 100644 --- a/crates/bevy_ecs/src/entity/map_entities.rs +++ b/crates/bevy_ecs/src/entity/map_entities.rs @@ -1,10 +1,15 @@ +pub use bevy_ecs_macros::MapEntities; + use crate::{ - entity::Entity, + entity::{hash_map::EntityHashMap, Entity}, identifier::masks::{IdentifierMask, HIGH_MASK}, world::World, }; -use super::{hash_map::EntityHashMap, VisitEntitiesMut}; +use alloc::{collections::VecDeque, vec::Vec}; +use bevy_platform::collections::HashSet; +use core::hash::BuildHasher; +use smallvec::SmallVec; /// Operation to map all contained [`Entity`] fields in a type to new values. /// @@ -15,15 +20,11 @@ use super::{hash_map::EntityHashMap, VisitEntitiesMut}; /// (usually by using an [`EntityHashMap`] between source entities and entities in the /// current world). /// -/// This trait is similar to [`VisitEntitiesMut`]. They differ in that [`VisitEntitiesMut`] operates -/// on `&mut Entity` and allows for in-place modification, while this trait makes no assumption that -/// such in-place modification is occurring, which is impossible for types such as [`HashSet`] -/// and [`EntityHashMap`] which must be rebuilt when their contained [`Entity`]s are remapped. +/// Components use [`Component::map_entities`](crate::component::Component::map_entities) to map +/// entities in the context of scenes and entity cloning, which generally uses [`MapEntities`] internally +/// to map each field (see those docs for usage). /// -/// Implementing this trait correctly is required for properly loading components -/// with entity references from scenes. -/// -/// [`HashSet`]: bevy_platform_support::collections::HashSet +/// [`HashSet`]: bevy_platform::collections::HashSet /// /// ## Example /// @@ -49,17 +50,51 @@ pub trait MapEntities { /// /// Implementors should look up any and all [`Entity`] values stored within `self` and /// update them to the mapped values via `entity_mapper`. - fn map_entities(&mut self, entity_mapper: &mut M); + fn map_entities(&mut self, entity_mapper: &mut E); } -impl MapEntities for T { - fn map_entities(&mut self, entity_mapper: &mut M) { - self.visit_entities_mut(|entity| { - *entity = entity_mapper.get_mapped(*entity); - }); +impl MapEntities for Entity { + fn map_entities(&mut self, entity_mapper: &mut E) { + *self = entity_mapper.get_mapped(*self); } } +impl MapEntities for Option { + fn map_entities(&mut self, entity_mapper: &mut E) { + if let Some(entity) = self { + *entity = entity_mapper.get_mapped(*entity); + } + } +} + +impl MapEntities for HashSet { + fn map_entities(&mut self, entity_mapper: &mut E) { + *self = self.drain().map(|e| entity_mapper.get_mapped(e)).collect(); + } +} +impl MapEntities for Vec { + fn map_entities(&mut self, entity_mapper: &mut E) { + for entity in self.iter_mut() { + *entity = entity_mapper.get_mapped(*entity); + } + } +} + +impl MapEntities for VecDeque { + fn map_entities(&mut self, entity_mapper: &mut E) { + for entity in self.iter_mut() { + *entity = entity_mapper.get_mapped(*entity); + } + } +} + +impl> MapEntities for SmallVec { + fn map_entities(&mut self, entity_mapper: &mut E) { + for entity in self.iter_mut() { + *entity = entity_mapper.get_mapped(*entity); + } + } +} /// An implementor of this trait knows how to map an [`Entity`] into another [`Entity`]. /// /// Usually this is done by using an [`EntityHashMap`] to map source entities @@ -67,14 +102,13 @@ impl MapEntities for T { /// /// More generally, this can be used to map [`Entity`] references between any two [`Worlds`](World). /// -/// This can be used in tandem with [`Component::visit_entities`](crate::component::Component::visit_entities) -/// and [`Component::visit_entities_mut`](crate::component::Component::visit_entities_mut) to map a component's entities. +/// This is used by [`MapEntities`] implementors. /// /// ## Example /// /// ``` /// # use bevy_ecs::entity::{Entity, EntityMapper}; -/// # use bevy_ecs::entity::hash_map::EntityHashMap; +/// # use bevy_ecs::entity::EntityHashMap; /// # /// pub struct SimpleEntityMapper { /// map: EntityHashMap, @@ -247,7 +281,7 @@ impl<'m> SceneEntityMapper<'m> { #[cfg(test)] mod tests { use crate::{ - entity::{hash_map::EntityHashMap, Entity, EntityMapper, SceneEntityMapper}, + entity::{Entity, EntityHashMap, EntityMapper, SceneEntityMapper}, world::World, }; diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 3f3ef9370b..7bba07aac6 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -39,7 +39,6 @@ mod clone_entities; mod entity_set; mod map_entities; -mod visit_entities; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; #[cfg(all(feature = "bevy_reflect", feature = "serialize"))] @@ -48,11 +47,6 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; pub use clone_entities::*; pub use entity_set::*; pub use map_entities::*; -pub use visit_entities::*; - -mod unique_vec; - -pub use unique_vec::*; mod hash; pub use hash::*; @@ -60,18 +54,26 @@ pub use hash::*; pub mod hash_map; pub mod hash_set; -mod index_map; -mod index_set; +pub use hash_map::EntityHashMap; +pub use hash_set::EntityHashSet; + +pub mod index_map; +pub mod index_set; pub use index_map::EntityIndexMap; pub use index_set::EntityIndexSet; -mod unique_slice; +pub mod unique_array; +pub mod unique_slice; +pub mod unique_vec; -pub use unique_slice::*; +pub use unique_array::{UniqueEntityArray, UniqueEntityEquivalentArray}; +pub use unique_slice::{UniqueEntityEquivalentSlice, UniqueEntitySlice}; +pub use unique_vec::{UniqueEntityEquivalentVec, UniqueEntityVec}; use crate::{ archetype::{ArchetypeId, ArchetypeRow}, + change_detection::MaybeLocation, identifier::{ error::IdentifierError, kinds::IdKind, @@ -81,18 +83,15 @@ use crate::{ storage::{SparseSetIndex, TableId, TableRow}, }; use alloc::vec::Vec; -use bevy_platform_support::sync::atomic::Ordering; -use core::{fmt, hash::Hash, mem, num::NonZero}; +use bevy_platform::sync::atomic::Ordering; +use core::{fmt, hash::Hash, mem, num::NonZero, panic::Location}; use log::warn; -#[cfg(feature = "track_location")] -use core::panic::Location; - #[cfg(feature = "serialize")] use serde::{Deserialize, Serialize}; #[cfg(target_has_atomic = "64")] -use bevy_platform_support::sync::atomic::AtomicI64 as AtomicIdCursor; +use bevy_platform::sync::atomic::AtomicI64 as AtomicIdCursor; #[cfg(target_has_atomic = "64")] type IdCursor = i64; @@ -100,7 +99,7 @@ type IdCursor = i64; /// do not. This fallback allows compilation using a 32-bit cursor instead, with /// the caveat that some conversions may fail (and panic) at runtime. #[cfg(not(target_has_atomic = "64"))] -use bevy_platform_support::sync::atomic::AtomicIsize as AtomicIdCursor; +use bevy_platform::sync::atomic::AtomicIsize as AtomicIdCursor; #[cfg(not(target_has_atomic = "64"))] type IdCursor = isize; @@ -175,7 +174,7 @@ type IdCursor = isize; #[derive(Clone, Copy)] #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] #[cfg_attr(feature = "bevy_reflect", reflect(opaque))] -#[cfg_attr(feature = "bevy_reflect", reflect(Hash, PartialEq, Debug))] +#[cfg_attr(feature = "bevy_reflect", reflect(Hash, PartialEq, Debug, Clone))] #[cfg_attr( all(feature = "bevy_reflect", feature = "serialize"), reflect(Serialize, Deserialize) @@ -244,6 +243,10 @@ impl Hash for Entity { } } +#[deprecated( + since = "0.16.0", + note = "This is exclusively used with the now deprecated `Entities::alloc_at_without_replacement`." +)] pub(crate) enum AllocAtWithoutReplacement { Exists(EntityLocation), DidNotExist, @@ -577,8 +580,6 @@ pub struct Entities { /// [`flush`]: Entities::flush pending: Vec, free_cursor: AtomicIdCursor, - /// Stores the number of free entities for [`len`](Entities::len) - len: u32, } impl Entities { @@ -587,7 +588,6 @@ impl Entities { meta: Vec::new(), pending: Vec::new(), free_cursor: AtomicIdCursor::new(0), - len: 0, } } @@ -679,7 +679,6 @@ impl Entities { /// Allocate an entity ID directly. pub fn alloc(&mut self) -> Entity { self.verify_flushed(); - self.len += 1; if let Some(index) = self.pending.pop() { let new_free_cursor = self.pending.len() as IdCursor; *self.free_cursor.get_mut() = new_free_cursor; @@ -695,6 +694,10 @@ impl Entities { /// /// Returns the location of the entity currently using the given ID, if any. Location should be /// written immediately. + #[deprecated( + since = "0.16.0", + note = "This can cause extreme performance problems when used after freeing a large number of entities and requesting an arbitrary entity. See #18054 on GitHub." + )] pub fn alloc_at(&mut self, entity: Entity) -> Option { self.verify_flushed(); @@ -705,13 +708,11 @@ impl Entities { *self.free_cursor.get_mut() = new_free_cursor; self.meta .resize(entity.index() as usize + 1, EntityMeta::EMPTY); - self.len += 1; None } else if let Some(index) = self.pending.iter().position(|item| *item == entity.index()) { self.pending.swap_remove(index); let new_free_cursor = self.pending.len() as IdCursor; *self.free_cursor.get_mut() = new_free_cursor; - self.len += 1; None } else { Some(mem::replace( @@ -728,6 +729,14 @@ impl Entities { /// Allocate a specific entity ID, overwriting its generation. /// /// Returns the location of the entity currently using the given ID, if any. + #[deprecated( + since = "0.16.0", + note = "This can cause extreme performance problems when used after freeing a large number of entities and requesting an arbitrary entity. See #18054 on GitHub." + )] + #[expect( + deprecated, + reason = "We need to support `AllocAtWithoutReplacement` for now." + )] pub(crate) fn alloc_at_without_replacement( &mut self, entity: Entity, @@ -741,13 +750,11 @@ impl Entities { *self.free_cursor.get_mut() = new_free_cursor; self.meta .resize(entity.index() as usize + 1, EntityMeta::EMPTY); - self.len += 1; AllocAtWithoutReplacement::DidNotExist } else if let Some(index) = self.pending.iter().position(|item| *item == entity.index()) { self.pending.swap_remove(index); let new_free_cursor = self.pending.len() as IdCursor; *self.free_cursor.get_mut() = new_free_cursor; - self.len += 1; AllocAtWithoutReplacement::DidNotExist } else { let current_meta = &self.meta[entity.index() as usize]; @@ -790,7 +797,6 @@ impl Entities { let new_free_cursor = self.pending.len() as IdCursor; *self.free_cursor.get_mut() = new_free_cursor; - self.len -= 1; Some(loc) } @@ -828,11 +834,10 @@ impl Entities { self.meta.clear(); self.pending.clear(); *self.free_cursor.get_mut() = 0; - self.len = 0; } /// Returns the location of an [`Entity`]. - /// Note: for pending entities, returns `Some(EntityLocation::INVALID)`. + /// Note: for pending entities, returns `None`. #[inline] pub fn get(&self, entity: Entity) -> Option { if let Some(meta) = self.meta.get(entity.index() as usize) { @@ -924,7 +929,6 @@ impl Entities { let old_meta_len = self.meta.len(); let new_meta_len = old_meta_len + -current_free_cursor as usize; self.meta.resize(new_meta_len, EntityMeta::EMPTY); - self.len += -current_free_cursor as u32; for (index, meta) in self.meta.iter_mut().enumerate().skip(old_meta_len) { init( Entity::from_raw_and_generation(index as u32, meta.generation), @@ -936,7 +940,6 @@ impl Entities { 0 }; - self.len += (self.pending.len() - new_free_cursor) as u32; for index in self.pending.drain(new_free_cursor..) { let meta = &mut self.meta[index as usize]; init( @@ -970,56 +973,96 @@ impl Entities { self.meta.len() } + /// The count of all entities in the [`World`] that are used, + /// including both those allocated and those reserved, but not those freed. + /// + /// [`World`]: crate::world::World + #[inline] + pub fn used_count(&self) -> usize { + (self.meta.len() as isize - self.free_cursor.load(Ordering::Relaxed) as isize) as usize + } + + /// The count of all entities in the [`World`] that have ever been allocated or reserved, including those that are freed. + /// This is the value that [`Self::total_count()`] would return if [`Self::flush()`] were called right now. + /// + /// [`World`]: crate::world::World + #[inline] + pub fn total_prospective_count(&self) -> usize { + self.meta.len() + (-self.free_cursor.load(Ordering::Relaxed)).min(0) as usize + } + /// The count of currently allocated entities. #[inline] pub fn len(&self) -> u32 { - self.len + // `pending`, by definition, can't be bigger than `meta`. + (self.meta.len() - self.pending.len()) as u32 } /// Checks if any entity is currently active. #[inline] pub fn is_empty(&self) -> bool { - self.len == 0 + self.len() == 0 } /// Sets the source code location from which this entity has last been spawned /// or despawned. - #[cfg(feature = "track_location")] #[inline] - pub(crate) fn set_spawned_or_despawned_by(&mut self, index: u32, caller: &'static Location) { - let meta = self - .meta - .get_mut(index as usize) - .expect("Entity index invalid"); - meta.spawned_or_despawned_by = Some(caller); + pub(crate) fn set_spawned_or_despawned_by(&mut self, index: u32, caller: MaybeLocation) { + caller.map(|caller| { + let meta = self + .meta + .get_mut(index as usize) + .expect("Entity index invalid"); + meta.spawned_or_despawned_by = MaybeLocation::new(Some(caller)); + }); } /// Returns the source code location from which this entity has last been spawned /// or despawned. Returns `None` if its index has been reused by another entity /// or if this entity has never existed. - #[cfg(feature = "track_location")] pub fn entity_get_spawned_or_despawned_by( &self, entity: Entity, - ) -> Option<&'static Location<'static>> { - self.meta - .get(entity.index() as usize) - .filter(|meta| + ) -> MaybeLocation>> { + MaybeLocation::new_with_flattened(|| { + self.meta + .get(entity.index() as usize) + .filter(|meta| // Generation is incremented immediately upon despawn (meta.generation == entity.generation) || (meta.location.archetype_id == ArchetypeId::INVALID) && (meta.generation == IdentifierMask::inc_masked_high_by(entity.generation, 1))) - .and_then(|meta| meta.spawned_or_despawned_by) + .map(|meta| meta.spawned_or_despawned_by) + }) + .map(Option::flatten) } - /// Constructs a message explaining why an entity does not exists, if known. + /// Constructs a message explaining why an entity does not exist, if known. pub(crate) fn entity_does_not_exist_error_details( &self, - _entity: Entity, + entity: Entity, ) -> EntityDoesNotExistDetails { EntityDoesNotExistDetails { - #[cfg(feature = "track_location")] - location: self.entity_get_spawned_or_despawned_by(_entity), + location: self.entity_get_spawned_or_despawned_by(entity), + } + } +} + +/// An error that occurs when a specified [`Entity`] does not exist. +#[derive(thiserror::Error, Debug, Clone, Copy, PartialEq, Eq)] +#[error("The entity with ID {entity} {details}")] +pub struct EntityDoesNotExistError { + /// The entity's ID. + pub entity: Entity, + /// Details on why the entity does not exist, if available. + pub details: EntityDoesNotExistDetails, +} + +impl EntityDoesNotExistError { + pub(crate) fn new(entity: Entity, entities: &Entities) -> Self { + Self { + entity, + details: entities.entity_does_not_exist_error_details(entity), } } } @@ -1028,26 +1071,22 @@ impl Entities { /// regarding an entity that did not exist. #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct EntityDoesNotExistDetails { - #[cfg(feature = "track_location")] - location: Option<&'static Location<'static>>, + location: MaybeLocation>>, } impl fmt::Display for EntityDoesNotExistDetails { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - #[cfg(feature = "track_location")] - if let Some(location) = self.location { - write!(f, "was despawned by {location}") - } else { - write!( + match self.location.into_option() { + Some(Some(location)) => write!(f, "was despawned by {location}"), + Some(None) => write!( f, "does not exist (index has been reused or was never spawned)" - ) + ), + None => write!( + f, + "does not exist (enable `track_location` feature for more details)" + ), } - #[cfg(not(feature = "track_location"))] - write!( - f, - "does not exist (enable `track_location` feature for more details)" - ) } } @@ -1058,8 +1097,7 @@ struct EntityMeta { /// The current location of the [`Entity`] pub location: EntityLocation, /// Location of the last spawn or despawn of this entity - #[cfg(feature = "track_location")] - spawned_or_despawned_by: Option<&'static Location<'static>>, + spawned_or_despawned_by: MaybeLocation>>, } impl EntityMeta { @@ -1067,8 +1105,7 @@ impl EntityMeta { const EMPTY: EntityMeta = EntityMeta { generation: NonZero::::MIN, location: EntityLocation::INVALID, - #[cfg(feature = "track_location")] - spawned_or_despawned_by: None, + spawned_or_despawned_by: MaybeLocation::new(None), }; } diff --git a/crates/bevy_ecs/src/entity/unique_array.rs b/crates/bevy_ecs/src/entity/unique_array.rs new file mode 100644 index 0000000000..ce31e55448 --- /dev/null +++ b/crates/bevy_ecs/src/entity/unique_array.rs @@ -0,0 +1,587 @@ +//! A wrapper around entity arrays with a uniqueness invariant. + +use core::{ + array, + borrow::{Borrow, BorrowMut}, + fmt::Debug, + ops::{ + Bound, Deref, DerefMut, Index, IndexMut, Range, RangeFrom, RangeFull, RangeInclusive, + RangeTo, RangeToInclusive, + }, + ptr, +}; + +use alloc::{ + boxed::Box, + collections::{BTreeSet, BinaryHeap, LinkedList, VecDeque}, + rc::Rc, + vec::Vec, +}; + +use bevy_platform::sync::Arc; + +use super::{ + unique_slice::{self, UniqueEntityEquivalentSlice}, + Entity, EntityEquivalent, UniqueEntityIter, +}; + +/// An array that contains only unique entities. +/// +/// It can be obtained through certain methods on [`UniqueEntityEquivalentSlice`], +/// and some [`TryFrom`] implementations. +/// +/// When `T` is [`Entity`], use [`UniqueEntityArray`]. +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct UniqueEntityEquivalentArray([T; N]); + +/// An array that contains only unique [`Entity`]. +/// +/// This is the default case of a [`UniqueEntityEquivalentArray`]. +pub type UniqueEntityArray = UniqueEntityEquivalentArray; + +impl UniqueEntityEquivalentArray { + /// Constructs a `UniqueEntityEquivalentArray` from a [`[T; N]`] unsafely. + /// + /// # Safety + /// + /// `array` must contain only unique elements. + pub const unsafe fn from_array_unchecked(array: [T; N]) -> Self { + Self(array) + } + + /// Constructs a `&UniqueEntityEquivalentArray` from a [`&[T; N]`] unsafely. + /// + /// # Safety + /// + /// `array` must contain only unique elements. + pub const unsafe fn from_array_ref_unchecked(array: &[T; N]) -> &Self { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { &*(ptr::from_ref(array).cast()) } + } + + /// Constructs a `Box` from a [`Box<[T; N]>`] unsafely. + /// + /// # Safety + /// + /// `array` must contain only unique elements. + pub unsafe fn from_boxed_array_unchecked(array: Box<[T; N]>) -> Box { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { Box::from_raw(Box::into_raw(array).cast()) } + } + + /// Casts `self` into the inner array. + pub fn into_boxed_inner(self: Box) -> Box<[T; N]> { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { Box::from_raw(Box::into_raw(self).cast()) } + } + + /// Constructs a `Arc` from a [`Arc<[T; N]>`] unsafely. + /// + /// # Safety + /// + /// `slice` must contain only unique elements. + pub unsafe fn from_arc_array_unchecked(slice: Arc<[T; N]>) -> Arc { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { Arc::from_raw(Arc::into_raw(slice).cast()) } + } + + /// Casts `self` to the inner array. + pub fn into_arc_inner(this: Arc) -> Arc<[T; N]> { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { Arc::from_raw(Arc::into_raw(this).cast()) } + } + + // Constructs a `Rc` from a [`Rc<[T; N]>`] unsafely. + /// + /// # Safety + /// + /// `slice` must contain only unique elements. + pub unsafe fn from_rc_array_unchecked(slice: Rc<[T; N]>) -> Rc { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { Rc::from_raw(Rc::into_raw(slice).cast()) } + } + + /// Casts `self` to the inner array. + pub fn into_rc_inner(self: Rc) -> Rc<[T; N]> { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { Rc::from_raw(Rc::into_raw(self).cast()) } + } + + /// Return the inner array. + pub fn into_inner(self) -> [T; N] { + self.0 + } + + /// Returns a reference to the inner array. + pub fn as_inner(&self) -> &[T; N] { + &self.0 + } + + /// Returns a slice containing the entire array. Equivalent to `&s[..]`. + pub const fn as_slice(&self) -> &UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original array are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.as_slice()) } + } + + /// Returns a mutable slice containing the entire array. Equivalent to + /// `&mut s[..]`. + pub fn as_mut_slice(&mut self) -> &mut UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original array are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.as_mut_slice()) } + } + + /// Borrows each element and returns an array of references with the same + /// size as `self`. + /// + /// Equivalent to [`[T; N]::as_ref`](array::each_ref). + pub fn each_ref(&self) -> UniqueEntityEquivalentArray<&T, N> { + UniqueEntityEquivalentArray(self.0.each_ref()) + } +} + +impl Deref for UniqueEntityEquivalentArray { + type Target = UniqueEntityEquivalentSlice; + + fn deref(&self) -> &Self::Target { + // SAFETY: All elements in the original array are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(&self.0) } + } +} + +impl DerefMut for UniqueEntityEquivalentArray { + fn deref_mut(&mut self) -> &mut Self::Target { + // SAFETY: All elements in the original array are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(&mut self.0) } + } +} +impl Default for UniqueEntityEquivalentArray { + fn default() -> Self { + Self(Default::default()) + } +} + +impl<'a, T: EntityEquivalent, const N: usize> IntoIterator + for &'a UniqueEntityEquivalentArray +{ + type Item = &'a T; + + type IntoIter = unique_slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + // SAFETY: All elements in the original array are unique. + unsafe { UniqueEntityIter::from_iterator_unchecked(self.0.iter()) } + } +} + +impl IntoIterator for UniqueEntityEquivalentArray { + type Item = T; + + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + // SAFETY: All elements in the original array are unique. + unsafe { UniqueEntityIter::from_iterator_unchecked(self.0.into_iter()) } + } +} + +impl AsRef> + for UniqueEntityEquivalentArray +{ + fn as_ref(&self) -> &UniqueEntityEquivalentSlice { + self + } +} + +impl AsMut> + for UniqueEntityEquivalentArray +{ + fn as_mut(&mut self) -> &mut UniqueEntityEquivalentSlice { + self + } +} + +impl Borrow> + for UniqueEntityEquivalentArray +{ + fn borrow(&self) -> &UniqueEntityEquivalentSlice { + self + } +} + +impl BorrowMut> + for UniqueEntityEquivalentArray +{ + fn borrow_mut(&mut self) -> &mut UniqueEntityEquivalentSlice { + self + } +} + +impl Index<(Bound, Bound)> + for UniqueEntityEquivalentArray +{ + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: (Bound, Bound)) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> + for UniqueEntityEquivalentArray +{ + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: Range) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> + for UniqueEntityEquivalentArray +{ + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeFrom) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for UniqueEntityEquivalentArray { + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeFull) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> + for UniqueEntityEquivalentArray +{ + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeInclusive) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> + for UniqueEntityEquivalentArray +{ + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeTo) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> + for UniqueEntityEquivalentArray +{ + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeToInclusive) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for UniqueEntityEquivalentArray { + type Output = T; + fn index(&self, key: usize) -> &T { + self.0.index(key) + } +} + +impl IndexMut<(Bound, Bound)> + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: (Bound, Bound)) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: Range) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: RangeFrom) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: RangeFull) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: RangeInclusive) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: RangeTo) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: RangeToInclusive) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl From<&[T; 1]> for UniqueEntityEquivalentArray { + fn from(value: &[T; 1]) -> Self { + Self(value.clone()) + } +} + +impl From<&[T; 0]> for UniqueEntityEquivalentArray { + fn from(value: &[T; 0]) -> Self { + Self(value.clone()) + } +} + +impl From<&mut [T; 1]> for UniqueEntityEquivalentArray { + fn from(value: &mut [T; 1]) -> Self { + Self(value.clone()) + } +} + +impl From<&mut [T; 0]> for UniqueEntityEquivalentArray { + fn from(value: &mut [T; 0]) -> Self { + Self(value.clone()) + } +} + +impl From<[T; 1]> for UniqueEntityEquivalentArray { + fn from(value: [T; 1]) -> Self { + Self(value) + } +} + +impl From<[T; 0]> for UniqueEntityEquivalentArray { + fn from(value: [T; 0]) -> Self { + Self(value) + } +} + +impl From> for (T,) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T, T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T, T, T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T, T, T, T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T, T, T, T, T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> + for (T, T, T, T, T, T, T, T, T, T) +{ + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> + for (T, T, T, T, T, T, T, T, T, T, T) +{ + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> + for (T, T, T, T, T, T, T, T, T, T, T, T) +{ + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> + for BTreeSet +{ + fn from(value: UniqueEntityEquivalentArray) -> Self { + BTreeSet::from(value.0) + } +} + +impl From> + for BinaryHeap +{ + fn from(value: UniqueEntityEquivalentArray) -> Self { + BinaryHeap::from(value.0) + } +} + +impl From> + for LinkedList +{ + fn from(value: UniqueEntityEquivalentArray) -> Self { + LinkedList::from(value.0) + } +} + +impl From> for Vec { + fn from(value: UniqueEntityEquivalentArray) -> Self { + Vec::from(value.0) + } +} + +impl From> for VecDeque { + fn from(value: UniqueEntityEquivalentArray) -> Self { + VecDeque::from(value.0) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq<&UniqueEntityEquivalentSlice> for UniqueEntityEquivalentArray +{ + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { + self.0.eq(&other.as_inner()) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq> for UniqueEntityEquivalentArray +{ + fn eq(&self, other: &UniqueEntityEquivalentSlice) -> bool { + self.0.eq(other.as_inner()) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq<&UniqueEntityEquivalentArray> for Vec +{ + fn eq(&self, other: &&UniqueEntityEquivalentArray) -> bool { + self.eq(&other.0) + } +} +impl, U: EntityEquivalent, const N: usize> + PartialEq<&UniqueEntityEquivalentArray> for VecDeque +{ + fn eq(&self, other: &&UniqueEntityEquivalentArray) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq<&mut UniqueEntityEquivalentArray> for VecDeque +{ + fn eq(&self, other: &&mut UniqueEntityEquivalentArray) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq> for Vec +{ + fn eq(&self, other: &UniqueEntityEquivalentArray) -> bool { + self.eq(&other.0) + } +} +impl, U: EntityEquivalent, const N: usize> + PartialEq> for VecDeque +{ + fn eq(&self, other: &UniqueEntityEquivalentArray) -> bool { + self.eq(&other.0) + } +} + +/// A by-value array iterator. +/// +/// Equivalent to [`array::IntoIter`]. +pub type IntoIter = UniqueEntityIter>; + +impl UniqueEntityIter> { + /// Returns an immutable slice of all elements that have not been yielded + /// yet. + /// + /// Equivalent to [`array::IntoIter::as_slice`]. + pub fn as_slice(&self) -> &UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.as_inner().as_slice()) } + } + + /// Returns a mutable slice of all elements that have not been yielded yet. + /// + /// Equivalent to [`array::IntoIter::as_mut_slice`]. + pub fn as_mut_slice(&mut self) -> &mut UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { + UniqueEntityEquivalentSlice::from_slice_unchecked_mut( + self.as_mut_inner().as_mut_slice(), + ) + } + } +} diff --git a/crates/bevy_ecs/src/entity/unique_slice.rs b/crates/bevy_ecs/src/entity/unique_slice.rs index d9f01277dc..e45c3a21c0 100644 --- a/crates/bevy_ecs/src/entity/unique_slice.rs +++ b/crates/bevy_ecs/src/entity/unique_slice.rs @@ -1,7 +1,11 @@ +//! A wrapper around entity slices with a uniqueness invariant. + use core::{ + array::TryFromSliceError, borrow::Borrow, cmp::Ordering, fmt::Debug, + iter::FusedIterator, ops::{ Bound, Deref, Index, IndexMut, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive, @@ -15,40 +19,49 @@ use alloc::{ boxed::Box, collections::VecDeque, rc::Rc, - sync::Arc, vec::Vec, }; +use bevy_platform::sync::Arc; + use super::{ - unique_vec, EntitySet, EntitySetIterator, FromEntitySetIterator, TrustedEntityBorrow, - UniqueEntityIter, UniqueEntityVec, + unique_vec::{self, UniqueEntityEquivalentVec}, + Entity, EntityEquivalent, EntitySet, EntitySetIterator, FromEntitySetIterator, + UniqueEntityEquivalentArray, UniqueEntityIter, }; /// A slice that contains only unique entities. /// -/// It can be obtained by slicing [`UniqueEntityVec`]. +/// This can be obtained by slicing [`UniqueEntityEquivalentVec`]. +/// +/// When `T` is [`Entity`], use [`UniqueEntitySlice`]. #[repr(transparent)] #[derive(Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct UniqueEntitySlice([T]); +pub struct UniqueEntityEquivalentSlice([T]); -impl UniqueEntitySlice { - /// Constructs a `UniqueEntitySlice` from a [`&[T]`] unsafely. +/// A slice that contains only unique [`Entity`]. +/// +/// This is the default case of a [`UniqueEntityEquivalentSlice`]. +pub type UniqueEntitySlice = UniqueEntityEquivalentSlice; + +impl UniqueEntityEquivalentSlice { + /// Constructs a `UniqueEntityEquivalentSlice` from a [`&[T]`] unsafely. /// /// # Safety /// /// `slice` must contain only unique elements. pub const unsafe fn from_slice_unchecked(slice: &[T]) -> &Self { - // SAFETY: UniqueEntitySlice is a transparent wrapper around [T]. + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. unsafe { &*(ptr::from_ref(slice) as *const Self) } } - /// Constructs a `UniqueEntitySlice` from a [`&mut [T]`] unsafely. + /// Constructs a `UniqueEntityEquivalentSlice` from a [`&mut [T]`] unsafely. /// /// # Safety /// /// `slice` must contain only unique elements. pub const unsafe fn from_slice_unchecked_mut(slice: &mut [T]) -> &mut Self { - // SAFETY: UniqueEntitySlice is a transparent wrapper around [T]. + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. unsafe { &mut *(ptr::from_mut(slice) as *mut Self) } } @@ -57,51 +70,51 @@ impl UniqueEntitySlice { &self.0 } - /// Constructs a `UniqueEntitySlice` from a [`Box<[T]>`] unsafely. + /// Constructs a `UniqueEntityEquivalentSlice` from a [`Box<[T]>`] unsafely. /// /// # Safety /// /// `slice` must contain only unique elements. pub unsafe fn from_boxed_slice_unchecked(slice: Box<[T]>) -> Box { - // SAFETY: UniqueEntitySlice is a transparent wrapper around [T]. + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. unsafe { Box::from_raw(Box::into_raw(slice) as *mut Self) } } /// Casts `self` to the inner slice. pub fn into_boxed_inner(self: Box) -> Box<[T]> { - // SAFETY: UniqueEntitySlice is a transparent wrapper around [T]. + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. unsafe { Box::from_raw(Box::into_raw(self) as *mut [T]) } } - /// Constructs a `UniqueEntitySlice` from a [`Arc<[T]>`] unsafely. + /// Constructs a `UniqueEntityEquivalentSlice` from a [`Arc<[T]>`] unsafely. /// /// # Safety /// /// `slice` must contain only unique elements. pub unsafe fn from_arc_slice_unchecked(slice: Arc<[T]>) -> Arc { - // SAFETY: UniqueEntitySlice is a transparent wrapper around [T]. + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. unsafe { Arc::from_raw(Arc::into_raw(slice) as *mut Self) } } /// Casts `self` to the inner slice. - pub fn into_arc_inner(self: Arc) -> Arc<[T]> { - // SAFETY: UniqueEntitySlice is a transparent wrapper around [T]. - unsafe { Arc::from_raw(Arc::into_raw(self) as *mut [T]) } + pub fn into_arc_inner(this: Arc) -> Arc<[T]> { + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. + unsafe { Arc::from_raw(Arc::into_raw(this) as *mut [T]) } } - // Constructs a `UniqueEntitySlice` from a [`Rc<[T]>`] unsafely. + // Constructs a `UniqueEntityEquivalentSlice` from a [`Rc<[T]>`] unsafely. /// /// # Safety /// /// `slice` must contain only unique elements. pub unsafe fn from_rc_slice_unchecked(slice: Rc<[T]>) -> Rc { - // SAFETY: UniqueEntitySlice is a transparent wrapper around [T]. + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. unsafe { Rc::from_raw(Rc::into_raw(slice) as *mut Self) } } /// Casts `self` to the inner slice. pub fn into_rc_inner(self: Rc) -> Rc<[T]> { - // SAFETY: UniqueEntitySlice is a transparent wrapper around [T]. + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. unsafe { Rc::from_raw(Rc::into_raw(self) as *mut [T]) } } @@ -127,6 +140,70 @@ impl UniqueEntitySlice { Some((last, unsafe { Self::from_slice_unchecked(rest) })) } + /// Returns an array reference to the first `N` items in the slice. + /// + /// Equivalent to [`[T]::first_chunk`](slice::first_chunk). + pub const fn first_chunk(&self) -> Option<&UniqueEntityEquivalentArray> { + let Some(chunk) = self.0.first_chunk() else { + return None; + }; + // SAFETY: All elements in the original slice are unique. + Some(unsafe { UniqueEntityEquivalentArray::from_array_ref_unchecked(chunk) }) + } + + /// Returns an array reference to the first `N` items in the slice and the remaining slice. + /// + /// Equivalent to [`[T]::split_first_chunk`](slice::split_first_chunk). + pub const fn split_first_chunk( + &self, + ) -> Option<( + &UniqueEntityEquivalentArray, + &UniqueEntityEquivalentSlice, + )> { + let Some((chunk, rest)) = self.0.split_first_chunk() else { + return None; + }; + // SAFETY: All elements in the original slice are unique. + unsafe { + Some(( + UniqueEntityEquivalentArray::from_array_ref_unchecked(chunk), + Self::from_slice_unchecked(rest), + )) + } + } + + /// Returns an array reference to the last `N` items in the slice and the remaining slice. + /// + /// Equivalent to [`[T]::split_last_chunk`](slice::split_last_chunk). + pub const fn split_last_chunk( + &self, + ) -> Option<( + &UniqueEntityEquivalentSlice, + &UniqueEntityEquivalentArray, + )> { + let Some((rest, chunk)) = self.0.split_last_chunk() else { + return None; + }; + // SAFETY: All elements in the original slice are unique. + unsafe { + Some(( + Self::from_slice_unchecked(rest), + UniqueEntityEquivalentArray::from_array_ref_unchecked(chunk), + )) + } + } + + /// Returns an array reference to the last `N` items in the slice. + /// + /// Equivalent to [`[T]::last_chunk`](slice::last_chunk). + pub const fn last_chunk(&self) -> Option<&UniqueEntityEquivalentArray> { + let Some(chunk) = self.0.last_chunk() else { + return None; + }; + // SAFETY: All elements in the original slice are unique. + Some(unsafe { UniqueEntityEquivalentArray::from_array_ref_unchecked(chunk) }) + } + /// Returns a reference to a subslice. /// /// Equivalent to the range functionality of [`[T]::get`]. @@ -148,7 +225,7 @@ impl UniqueEntitySlice { /// /// Equivalent to the range functionality of [`[T]::get_mut`]. /// - /// Note that `UniqueEntitySlice::get_mut` cannot be called with a [`usize`]. + /// Note that `UniqueEntityEquivalentSlice::get_mut` cannot be called with a [`usize`]. /// /// [`[T]::get_mut`]: `slice::get_mut`s pub fn get_mut(&mut self, index: I) -> Option<&mut Self> @@ -184,7 +261,7 @@ impl UniqueEntitySlice { /// /// Equivalent to the range functionality of [`[T]::get_unchecked_mut`]. /// - /// Note that `UniqueEntitySlice::get_unchecked_mut` cannot be called with an index. + /// Note that `UniqueEntityEquivalentSlice::get_unchecked_mut` cannot be called with an index. /// /// # Safety /// @@ -226,9 +303,175 @@ impl UniqueEntitySlice { unsafe { UniqueEntityIter::from_iterator_unchecked(self.0.iter()) } } + /// Returns an iterator over all contiguous windows of length + /// `size`. + /// + /// Equivalent to [`[T]::windows`]. + /// + /// [`[T]::windows`]: `slice::windows` + pub fn windows(&self, size: usize) -> Windows<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked(self.0.windows(size)) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// beginning of the slice. + /// + /// Equivalent to [`[T]::chunks`]. + /// + /// [`[T]::chunks`]: `slice::chunks` + pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked( + self.0.chunks(chunk_size), + ) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// beginning of the slice. + /// + /// Equivalent to [`[T]::chunks_mut`]. + /// + /// [`[T]::chunks_mut`]: `slice::chunks_mut` + pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.chunks_mut(chunk_size), + ) + } + } + + /// + /// + /// Equivalent to [`[T]::chunks_exact`]. + /// + /// [`[T]::chunks_exact`]: `slice::chunks_exact` + pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked( + self.0.chunks_exact(chunk_size), + ) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// beginning of the slice. + /// + /// Equivalent to [`[T]::chunks_exact_mut`]. + /// + /// [`[T]::chunks_exact_mut`]: `slice::chunks_exact_mut` + pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.chunks_exact_mut(chunk_size), + ) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end + /// of the slice. + /// + /// Equivalent to [`[T]::rchunks`]. + /// + /// [`[T]::rchunks`]: `slice::rchunks` + pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked( + self.0.rchunks(chunk_size), + ) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end + /// of the slice. + /// + /// Equivalent to [`[T]::rchunks_mut`]. + /// + /// [`[T]::rchunks_mut`]: `slice::rchunks_mut` + pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.rchunks_mut(chunk_size), + ) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// end of the slice. + /// + /// Equivalent to [`[T]::rchunks_exact`]. + /// + /// [`[T]::rchunks_exact`]: `slice::rchunks_exact` + pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked( + self.0.rchunks_exact(chunk_size), + ) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end + /// of the slice. + /// + /// Equivalent to [`[T]::rchunks_exact_mut`]. + /// + /// [`[T]::rchunks_exact_mut`]: `slice::rchunks_exact_mut` + pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.rchunks_exact_mut(chunk_size), + ) + } + } + + /// Returns an iterator over the slice producing non-overlapping runs + /// of elements using the predicate to separate them. + /// + /// Equivalent to [`[T]::chunk_by`]. + /// + /// [`[T]::chunk_by`]: `slice::chunk_by` + pub fn chunk_by(&self, pred: F) -> ChunkBy<'_, F, T> + where + F: FnMut(&T, &T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked(self.0.chunk_by(pred)) + } + } + + /// Returns an iterator over the slice producing non-overlapping mutable + /// runs of elements using the predicate to separate them. + /// + /// Equivalent to [`[T]::chunk_by_mut`]. + /// + /// [`[T]::chunk_by_mut`]: `slice::chunk_by_mut` + pub fn chunk_by_mut(&mut self, pred: F) -> ChunkByMut<'_, F, T> + where + F: FnMut(&T, &T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.chunk_by_mut(pred), + ) + } + } + /// Divides one slice into two at an index. /// - /// Equivalent to [`[T]::split_at`](slice::split_at) + /// Equivalent to [`[T]::split_at`](slice::split_at). pub const fn split_at(&self, mid: usize) -> (&Self, &Self) { let (left, right) = self.0.split_at(mid); // SAFETY: All elements in the original slice are unique. @@ -242,7 +485,7 @@ impl UniqueEntitySlice { /// Divides one mutable slice into two at an index. /// - /// Equivalent to [`[T]::split_at_mut`](slice::split_at_mut) + /// Equivalent to [`[T]::split_at_mut`](slice::split_at_mut). pub const fn split_at_mut(&mut self, mid: usize) -> (&mut Self, &mut Self) { let (left, right) = self.0.split_at_mut(mid); // SAFETY: All elements in the original slice are unique. @@ -256,7 +499,7 @@ impl UniqueEntitySlice { /// Divides one slice into two at an index, without doing bounds checking. /// - /// Equivalent to [`[T]::split_at_unchecked`](slice::split_at_unchecked) + /// Equivalent to [`[T]::split_at_unchecked`](slice::split_at_unchecked). /// /// # Safety /// @@ -330,6 +573,179 @@ impl UniqueEntitySlice { } } + /// Returns an iterator over subslices separated by elements that match + /// `pred`. + /// + /// Equivalent to [`[T]::split`]. + /// + /// [`[T]::split`]: `slice::split` + pub fn split(&self, pred: F) -> Split<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked(self.0.split(pred)) + } + } + + /// Returns an iterator over mutable subslices separated by elements that + /// match `pred`. + /// + /// Equivalent to [`[T]::split_mut`]. + /// + /// [`[T]::split_mut`]: `slice::split_mut` + pub fn split_mut(&mut self, pred: F) -> SplitMut<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.split_mut(pred), + ) + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred`. + /// + /// Equivalent to [`[T]::split_inclusive`]. + /// + /// [`[T]::split_inclusive`]: `slice::split_inclusive` + pub fn split_inclusive(&self, pred: F) -> SplitInclusive<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked( + self.0.split_inclusive(pred), + ) + } + } + + /// Returns an iterator over mutable subslices separated by elements that + /// match `pred`. + /// + /// Equivalent to [`[T]::split_inclusive_mut`]. + /// + /// [`[T]::split_inclusive_mut`]: `slice::split_inclusive_mut` + pub fn split_inclusive_mut(&mut self, pred: F) -> SplitInclusiveMut<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.split_inclusive_mut(pred), + ) + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred`, starting at the end of the slice and working backwards. + /// + /// Equivalent to [`[T]::rsplit`]. + /// + /// [`[T]::rsplit`]: `slice::rsplit` + pub fn rsplit(&self, pred: F) -> RSplit<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked(self.0.rsplit(pred)) + } + } + + /// Returns an iterator over mutable subslices separated by elements that + /// match `pred`, starting at the end of the slice and working + /// backwards. + /// + /// Equivalent to [`[T]::rsplit_mut`]. + /// + /// [`[T]::rsplit_mut`]: `slice::rsplit_mut` + pub fn rsplit_mut(&mut self, pred: F) -> RSplitMut<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.rsplit_mut(pred), + ) + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred`, limited to returning at most `n` items. + /// + /// Equivalent to [`[T]::splitn`]. + /// + /// [`[T]::splitn`]: `slice::splitn` + pub fn splitn(&self, n: usize, pred: F) -> SplitN<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked(self.0.splitn(n, pred)) + } + } + + /// Returns an iterator over mutable subslices separated by elements that match + /// `pred`, limited to returning at most `n` items. + /// + /// Equivalent to [`[T]::splitn_mut`]. + /// + /// [`[T]::splitn_mut`]: `slice::splitn_mut` + pub fn splitn_mut(&mut self, n: usize, pred: F) -> SplitNMut<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.splitn_mut(n, pred), + ) + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred` limited to returning at most `n` items. + /// + /// Equivalent to [`[T]::rsplitn`]. + /// + /// [`[T]::rsplitn`]: `slice::rsplitn` + pub fn rsplitn(&self, n: usize, pred: F) -> RSplitN<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked(self.0.rsplitn(n, pred)) + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred` limited to returning at most `n` items. + /// + /// Equivalent to [`[T]::rsplitn_mut`]. + /// + /// [`[T]::rsplitn_mut`]: `slice::rsplitn_mut` + pub fn rsplitn_mut(&mut self, n: usize, pred: F) -> RSplitNMut<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.rsplitn_mut(n, pred), + ) + } + } + /// Sorts the slice **without** preserving the initial order of equal elements. /// /// Equivalent to [`[T]::sort_unstable`](slice::sort_unstable). @@ -423,40 +839,40 @@ impl UniqueEntitySlice { self.0.sort_by_cached_key(f); } - /// Copies self into a new `UniqueEntityVec`. - pub fn to_vec(&self) -> UniqueEntityVec + /// Copies self into a new `UniqueEntityEquivalentVec`. + pub fn to_vec(&self) -> UniqueEntityEquivalentVec where T: Clone, { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntityVec::from_vec_unchecked(self.0.to_vec()) } + unsafe { UniqueEntityEquivalentVec::from_vec_unchecked(self.0.to_vec()) } } /// Converts `self` into a vector without clones or allocation. /// /// Equivalent to [`[T]::into_vec`](slice::into_vec). - pub fn into_vec(self: Box) -> UniqueEntityVec { + pub fn into_vec(self: Box) -> UniqueEntityEquivalentVec { // SAFETY: // This matches the implementation of `slice::into_vec`. // All elements in the original slice are unique. unsafe { let len = self.len(); let vec = Vec::from_raw_parts(Box::into_raw(self).cast::(), len, len); - UniqueEntityVec::from_vec_unchecked(vec) + UniqueEntityEquivalentVec::from_vec_unchecked(vec) } } } /// Converts a reference to T into a slice of length 1 (without copying). -pub const fn from_ref(s: &T) -> &UniqueEntitySlice { +pub const fn from_ref(s: &T) -> &UniqueEntityEquivalentSlice { // SAFETY: A slice with a length of 1 is always unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(slice::from_ref(s)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(slice::from_ref(s)) } } /// Converts a reference to T into a slice of length 1 (without copying). -pub const fn from_mut(s: &mut T) -> &mut UniqueEntitySlice { +pub const fn from_mut(s: &mut T) -> &mut UniqueEntityEquivalentSlice { // SAFETY: A slice with a length of 1 is always unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(slice::from_mut(s)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(slice::from_mut(s)) } } /// Forms a slice from a pointer and a length. @@ -467,12 +883,12 @@ pub const fn from_mut(s: &mut T) -> &mut UniqueEntitySli /// /// [`slice::from_raw_parts`] must be safe to call with `data` and `len`. /// Additionally, all elements in the resulting slice must be unique. -pub const unsafe fn from_raw_parts<'a, T: TrustedEntityBorrow>( +pub const unsafe fn from_raw_parts<'a, T: EntityEquivalent>( data: *const T, len: usize, -) -> &'a UniqueEntitySlice { +) -> &'a UniqueEntityEquivalentSlice { // SAFETY: The safety contract is upheld by the caller. - unsafe { UniqueEntitySlice::from_slice_unchecked(slice::from_raw_parts(data, len)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(slice::from_raw_parts(data, len)) } } /// Performs the same functionality as [`from_raw_parts`], except that a mutable slice is returned. @@ -483,15 +899,53 @@ pub const unsafe fn from_raw_parts<'a, T: TrustedEntityBorrow>( /// /// [`slice::from_raw_parts_mut`] must be safe to call with `data` and `len`. /// Additionally, all elements in the resulting slice must be unique. -pub const unsafe fn from_raw_parts_mut<'a, T: TrustedEntityBorrow>( +pub const unsafe fn from_raw_parts_mut<'a, T: EntityEquivalent>( data: *mut T, len: usize, -) -> &'a mut UniqueEntitySlice { +) -> &'a mut UniqueEntityEquivalentSlice { // SAFETY: The safety contract is upheld by the caller. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(slice::from_raw_parts_mut(data, len)) } + unsafe { + UniqueEntityEquivalentSlice::from_slice_unchecked_mut(slice::from_raw_parts_mut(data, len)) + } } -impl<'a, T: TrustedEntityBorrow> IntoIterator for &'a UniqueEntitySlice { +/// Casts a slice of entity slices to a slice of [`UniqueEntityEquivalentSlice`]s. +/// +/// # Safety +/// +/// All elements in each of the casted slices must be unique. +pub unsafe fn cast_slice_of_unique_entity_slice<'a, 'b, T: EntityEquivalent + 'a>( + slice: &'b [&'a [T]], +) -> &'b [&'a UniqueEntityEquivalentSlice] { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { &*(ptr::from_ref(slice) as *const [&UniqueEntityEquivalentSlice]) } +} + +/// Casts a mutable slice of entity slices to a slice of [`UniqueEntityEquivalentSlice`]s. +/// +/// # Safety +/// +/// All elements in each of the casted slices must be unique. +pub unsafe fn cast_slice_of_unique_entity_slice_mut<'a, 'b, T: EntityEquivalent + 'a>( + slice: &'b mut [&'a [T]], +) -> &'b mut [&'a UniqueEntityEquivalentSlice] { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { &mut *(ptr::from_mut(slice) as *mut [&UniqueEntityEquivalentSlice]) } +} + +/// Casts a mutable slice of mutable entity slices to a slice of mutable [`UniqueEntityEquivalentSlice`]s. +/// +/// # Safety +/// +/// All elements in each of the casted slices must be unique. +pub unsafe fn cast_slice_of_mut_unique_entity_slice_mut<'a, 'b, T: EntityEquivalent + 'a>( + slice: &'b mut [&'a mut [T]], +) -> &'b mut [&'a mut UniqueEntityEquivalentSlice] { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { &mut *(ptr::from_mut(slice) as *mut [&mut UniqueEntityEquivalentSlice]) } +} + +impl<'a, T: EntityEquivalent> IntoIterator for &'a UniqueEntityEquivalentSlice { type Item = &'a T; type IntoIter = Iter<'a, T>; @@ -501,7 +955,7 @@ impl<'a, T: TrustedEntityBorrow> IntoIterator for &'a UniqueEntitySlice { } } -impl<'a, T: TrustedEntityBorrow> IntoIterator for &'a Box> { +impl<'a, T: EntityEquivalent> IntoIterator for &'a Box> { type Item = &'a T; type IntoIter = Iter<'a, T>; @@ -511,7 +965,7 @@ impl<'a, T: TrustedEntityBorrow> IntoIterator for &'a Box> } } -impl IntoIterator for Box> { +impl IntoIterator for Box> { type Item = T; type IntoIter = unique_vec::IntoIter; @@ -521,7 +975,7 @@ impl IntoIterator for Box> { } } -impl Deref for UniqueEntitySlice { +impl Deref for UniqueEntityEquivalentSlice { type Target = [T]; fn deref(&self) -> &Self::Target { @@ -529,90 +983,107 @@ impl Deref for UniqueEntitySlice { } } -impl AsRef<[T]> for UniqueEntitySlice { +impl AsRef<[T]> for UniqueEntityEquivalentSlice { fn as_ref(&self) -> &[T] { self } } -impl AsRef for UniqueEntitySlice { +impl AsRef for UniqueEntityEquivalentSlice { fn as_ref(&self) -> &Self { self } } -impl AsMut for UniqueEntitySlice { +impl AsMut for UniqueEntityEquivalentSlice { fn as_mut(&mut self) -> &mut Self { self } } -impl Borrow<[T]> for UniqueEntitySlice { +impl Borrow<[T]> for UniqueEntityEquivalentSlice { fn borrow(&self) -> &[T] { self } } -impl Clone for Box> { +impl Clone for Box> { fn clone(&self) -> Self { self.to_vec().into_boxed_slice() } } -impl Default for &UniqueEntitySlice { +impl Default for &UniqueEntityEquivalentSlice { fn default() -> Self { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(Default::default()) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(Default::default()) } } } -impl Default for &mut UniqueEntitySlice { +impl Default for &mut UniqueEntityEquivalentSlice { fn default() -> Self { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(Default::default()) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(Default::default()) } } } -impl Default for Box> { +impl Default for Box> { fn default() -> Self { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_boxed_slice_unchecked(Default::default()) } + unsafe { UniqueEntityEquivalentSlice::from_boxed_slice_unchecked(Default::default()) } } } -impl From<&UniqueEntitySlice> for Box> { - fn from(value: &UniqueEntitySlice) -> Self { - // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_boxed_slice_unchecked(value.0.into()) } - } -} - -impl From<&UniqueEntitySlice> for Arc> { - fn from(value: &UniqueEntitySlice) -> Self { - // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_arc_slice_unchecked(value.0.into()) } - } -} - -impl From<&UniqueEntitySlice> for Rc> { - fn from(value: &UniqueEntitySlice) -> Self { - // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_rc_slice_unchecked(value.0.into()) } - } -} - -impl<'a, T: TrustedEntityBorrow + Clone> From<&'a UniqueEntitySlice> - for Cow<'a, UniqueEntitySlice> +impl From<&UniqueEntityEquivalentSlice> + for Box> { - fn from(value: &'a UniqueEntitySlice) -> Self { + fn from(value: &UniqueEntityEquivalentSlice) -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_boxed_slice_unchecked(value.0.into()) } + } +} + +impl From<&UniqueEntityEquivalentSlice> + for Arc> +{ + fn from(value: &UniqueEntityEquivalentSlice) -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_arc_slice_unchecked(value.0.into()) } + } +} + +impl From<&UniqueEntityEquivalentSlice> + for Rc> +{ + fn from(value: &UniqueEntityEquivalentSlice) -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_rc_slice_unchecked(value.0.into()) } + } +} + +impl<'a, T: EntityEquivalent + Clone> From<&'a UniqueEntityEquivalentSlice> + for Cow<'a, UniqueEntityEquivalentSlice> +{ + fn from(value: &'a UniqueEntityEquivalentSlice) -> Self { Cow::Borrowed(value) } } -impl<'a, T: TrustedEntityBorrow + Clone> From>> - for Box> +impl From> + for Box> { - fn from(value: Cow<'a, UniqueEntitySlice>) -> Self { + fn from(value: UniqueEntityEquivalentArray) -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { + UniqueEntityEquivalentSlice::from_boxed_slice_unchecked(Box::new(value.into_inner())) + } + } +} + +impl<'a, T: EntityEquivalent + Clone> From>> + for Box> +{ + fn from(value: Cow<'a, UniqueEntityEquivalentSlice>) -> Self { match value { Cow::Borrowed(slice) => Box::from(slice), Cow::Owned(slice) => Box::from(slice), @@ -620,208 +1091,274 @@ impl<'a, T: TrustedEntityBorrow + Clone> From>> } } -impl From> for Box> { - fn from(value: UniqueEntityVec) -> Self { +impl From> + for Box> +{ + fn from(value: UniqueEntityEquivalentVec) -> Self { value.into_boxed_slice() } } -impl FromIterator for Box> { +impl FromIterator for Box> { fn from_iter>(iter: I) -> Self { iter.into_iter() - .collect::>() + .collect::>() .into_boxed_slice() } } -impl FromEntitySetIterator for Box> { +impl FromEntitySetIterator for Box> { fn from_entity_set_iter>(iter: I) -> Self { iter.into_iter() - .collect_set::>() + .collect_set::>() .into_boxed_slice() } } -impl, U: TrustedEntityBorrow> PartialEq> - for &UniqueEntitySlice +impl, U: EntityEquivalent> + PartialEq> for &UniqueEntityEquivalentSlice { - fn eq(&self, other: &UniqueEntityVec) -> bool { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { self.0.eq(other.as_vec()) } } -impl, U: TrustedEntityBorrow> PartialEq> - for &mut UniqueEntitySlice +impl, U: EntityEquivalent> + PartialEq> for &mut UniqueEntityEquivalentSlice { - fn eq(&self, other: &UniqueEntityVec) -> bool { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { self.0.eq(other.as_vec()) } } -impl, U: TrustedEntityBorrow> PartialEq> - for UniqueEntitySlice +impl, U: EntityEquivalent> + PartialEq> for UniqueEntityEquivalentSlice { - fn eq(&self, other: &UniqueEntityVec) -> bool { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { self.0.eq(other.as_vec()) } } -impl, U: TrustedEntityBorrow, const N: usize> PartialEq<&UniqueEntitySlice> - for [T; N] +impl, U: EntityEquivalent, const N: usize> + PartialEq<&UniqueEntityEquivalentSlice> for [T; N] { - fn eq(&self, other: &&UniqueEntitySlice) -> bool { + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { self.eq(&other.0) } } -impl + Clone, U: TrustedEntityBorrow> PartialEq<&UniqueEntitySlice> +impl + Clone, U: EntityEquivalent> PartialEq<&UniqueEntityEquivalentSlice> for Cow<'_, [T]> { - fn eq(&self, other: &&UniqueEntitySlice) -> bool { + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { self.eq(&&other.0) } } -impl + Clone, U: TrustedEntityBorrow> - PartialEq<&UniqueEntitySlice> for Cow<'_, UniqueEntitySlice> +impl + Clone, U: EntityEquivalent> + PartialEq<&UniqueEntityEquivalentSlice> for Cow<'_, UniqueEntityEquivalentSlice> { - fn eq(&self, other: &&UniqueEntitySlice) -> bool { + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { self.0.eq(&other.0) } } -impl, U: TrustedEntityBorrow> PartialEq<&UniqueEntitySlice> for Vec { - fn eq(&self, other: &&UniqueEntitySlice) -> bool { +impl, U: EntityEquivalent> PartialEq<&UniqueEntityEquivalentSlice> for Vec { + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { self.eq(&other.0) } } -impl, U: TrustedEntityBorrow> PartialEq<&UniqueEntitySlice> for VecDeque { - fn eq(&self, other: &&UniqueEntitySlice) -> bool { +impl, U: EntityEquivalent> PartialEq<&UniqueEntityEquivalentSlice> + for VecDeque +{ + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { self.eq(&&other.0) } } -impl, U: TrustedEntityBorrow, const N: usize> PartialEq<&mut UniqueEntitySlice> - for [T; N] +impl, U: EntityEquivalent, const N: usize> + PartialEq<&mut UniqueEntityEquivalentSlice> for [T; N] { - fn eq(&self, other: &&mut UniqueEntitySlice) -> bool { + fn eq(&self, other: &&mut UniqueEntityEquivalentSlice) -> bool { self.eq(&other.0) } } -impl + Clone, U: TrustedEntityBorrow> PartialEq<&mut UniqueEntitySlice> +impl + Clone, U: EntityEquivalent> PartialEq<&mut UniqueEntityEquivalentSlice> for Cow<'_, [T]> { - fn eq(&self, other: &&mut UniqueEntitySlice) -> bool { + fn eq(&self, other: &&mut UniqueEntityEquivalentSlice) -> bool { self.eq(&&**other) } } -impl + Clone, U: TrustedEntityBorrow> - PartialEq<&mut UniqueEntitySlice> for Cow<'_, UniqueEntitySlice> +impl + Clone, U: EntityEquivalent> + PartialEq<&mut UniqueEntityEquivalentSlice> for Cow<'_, UniqueEntityEquivalentSlice> { - fn eq(&self, other: &&mut UniqueEntitySlice) -> bool { + fn eq(&self, other: &&mut UniqueEntityEquivalentSlice) -> bool { self.0.eq(&other.0) } } -impl + Clone, U: TrustedEntityBorrow> - PartialEq> for Cow<'_, UniqueEntitySlice> +impl + Clone, U: EntityEquivalent> + PartialEq> for Cow<'_, UniqueEntityEquivalentSlice> { - fn eq(&self, other: &UniqueEntityVec) -> bool { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { self.0.eq(other.as_vec()) } } -impl, U: TrustedEntityBorrow> PartialEq<&mut UniqueEntitySlice> for Vec { - fn eq(&self, other: &&mut UniqueEntitySlice) -> bool { +impl, U: EntityEquivalent> PartialEq<&mut UniqueEntityEquivalentSlice> + for Vec +{ + fn eq(&self, other: &&mut UniqueEntityEquivalentSlice) -> bool { self.eq(&other.0) } } -impl, U: TrustedEntityBorrow> PartialEq<&mut UniqueEntitySlice> for VecDeque { - fn eq(&self, other: &&mut UniqueEntitySlice) -> bool { +impl, U: EntityEquivalent> PartialEq<&mut UniqueEntityEquivalentSlice> + for VecDeque +{ + fn eq(&self, other: &&mut UniqueEntityEquivalentSlice) -> bool { self.eq(&&other.0) } } -impl, U: TrustedEntityBorrow> PartialEq> - for [T] +impl, U: EntityEquivalent> + PartialEq> for [T] { - fn eq(&self, other: &UniqueEntitySlice) -> bool { + fn eq(&self, other: &UniqueEntityEquivalentSlice) -> bool { self.eq(&other.0) } } -impl, U: TrustedEntityBorrow, const N: usize> PartialEq> +impl, U: EntityEquivalent, const N: usize> PartialEq> for [T; N] { - fn eq(&self, other: &UniqueEntitySlice) -> bool { + fn eq(&self, other: &UniqueEntityEquivalentSlice) -> bool { self.eq(&other.0) } } -impl, U: TrustedEntityBorrow> PartialEq> - for Vec +impl, U: EntityEquivalent> + PartialEq> for Vec { - fn eq(&self, other: &UniqueEntitySlice) -> bool { + fn eq(&self, other: &UniqueEntityEquivalentSlice) -> bool { self.eq(&other.0) } } -impl, U, const N: usize> PartialEq<[U; N]> - for &UniqueEntitySlice +impl, U, const N: usize> PartialEq<[U; N]> + for &UniqueEntityEquivalentSlice { fn eq(&self, other: &[U; N]) -> bool { self.0.eq(other) } } -impl, U, const N: usize> PartialEq<[U; N]> - for &mut UniqueEntitySlice +impl, U, const N: usize> PartialEq<[U; N]> + for &mut UniqueEntityEquivalentSlice { fn eq(&self, other: &[U; N]) -> bool { self.0.eq(other) } } -impl, U, const N: usize> PartialEq<[U; N]> - for UniqueEntitySlice +impl, U, const N: usize> PartialEq<[U; N]> + for UniqueEntityEquivalentSlice { fn eq(&self, other: &[U; N]) -> bool { self.0.eq(other) } } -impl, U> PartialEq> for &UniqueEntitySlice { +impl, U: EntityEquivalent, const N: usize> + PartialEq> for &UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &UniqueEntityEquivalentArray) -> bool { + self.0.eq(&other.0) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq> for &mut UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &UniqueEntityEquivalentArray) -> bool { + self.0.eq(&other.0) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq> for UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &UniqueEntityEquivalentArray) -> bool { + self.0.eq(&other.0) + } +} + +impl, U> PartialEq> for &UniqueEntityEquivalentSlice { fn eq(&self, other: &Vec) -> bool { self.0.eq(other) } } -impl, U> PartialEq> for &mut UniqueEntitySlice { +impl, U> PartialEq> + for &mut UniqueEntityEquivalentSlice +{ fn eq(&self, other: &Vec) -> bool { self.0.eq(other) } } -impl, U> PartialEq> for UniqueEntitySlice { +impl, U> PartialEq> for UniqueEntityEquivalentSlice { fn eq(&self, other: &Vec) -> bool { self.0.eq(other) } } -impl ToOwned for UniqueEntitySlice { - type Owned = UniqueEntityVec; +impl ToOwned for UniqueEntityEquivalentSlice { + type Owned = UniqueEntityEquivalentVec; fn to_owned(&self) -> Self::Owned { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntityVec::from_vec_unchecked(self.0.to_owned()) } + unsafe { UniqueEntityEquivalentVec::from_vec_unchecked(self.0.to_owned()) } } } -impl Index<(Bound, Bound)> for UniqueEntitySlice { +impl<'a, T: EntityEquivalent + Copy, const N: usize> TryFrom<&'a UniqueEntityEquivalentSlice> + for &'a UniqueEntityEquivalentArray +{ + type Error = TryFromSliceError; + + fn try_from(value: &'a UniqueEntityEquivalentSlice) -> Result { + <&[T; N]>::try_from(&value.0).map(|array| + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentArray::from_array_ref_unchecked(array) }) + } +} + +impl TryFrom<&UniqueEntityEquivalentSlice> + for UniqueEntityEquivalentArray +{ + type Error = TryFromSliceError; + + fn try_from(value: &UniqueEntityEquivalentSlice) -> Result { + <&Self>::try_from(value).copied() + } +} + +impl TryFrom<&mut UniqueEntityEquivalentSlice> + for UniqueEntityEquivalentArray +{ + type Error = TryFromSliceError; + + fn try_from(value: &mut UniqueEntityEquivalentSlice) -> Result { + ::try_from(&*value) + } +} + +impl Index<(Bound, Bound)> for UniqueEntityEquivalentSlice { type Output = Self; fn index(&self, key: (Bound, Bound)) -> &Self { // SAFETY: All elements in the original slice are unique. @@ -829,7 +1366,7 @@ impl Index<(Bound, Bound)> for UniqueEntit } } -impl Index> for UniqueEntitySlice { +impl Index> for UniqueEntityEquivalentSlice { type Output = Self; fn index(&self, key: Range) -> &Self { // SAFETY: All elements in the original slice are unique. @@ -837,7 +1374,7 @@ impl Index> for UniqueEntitySlice { } } -impl Index> for UniqueEntitySlice { +impl Index> for UniqueEntityEquivalentSlice { type Output = Self; fn index(&self, key: RangeFrom) -> &Self { // SAFETY: All elements in the original slice are unique. @@ -845,7 +1382,7 @@ impl Index> for UniqueEntitySlice { } } -impl Index for UniqueEntitySlice { +impl Index for UniqueEntityEquivalentSlice { type Output = Self; fn index(&self, key: RangeFull) -> &Self { // SAFETY: All elements in the original slice are unique. @@ -853,31 +1390,31 @@ impl Index for UniqueEntitySlice { } } -impl Index> for UniqueEntitySlice { - type Output = UniqueEntitySlice; +impl Index> for UniqueEntityEquivalentSlice { + type Output = UniqueEntityEquivalentSlice; fn index(&self, key: RangeInclusive) -> &Self { // SAFETY: All elements in the original slice are unique. unsafe { Self::from_slice_unchecked(self.0.index(key)) } } } -impl Index> for UniqueEntitySlice { - type Output = UniqueEntitySlice; +impl Index> for UniqueEntityEquivalentSlice { + type Output = UniqueEntityEquivalentSlice; fn index(&self, key: RangeTo) -> &Self { // SAFETY: All elements in the original slice are unique. unsafe { Self::from_slice_unchecked(self.0.index(key)) } } } -impl Index> for UniqueEntitySlice { - type Output = UniqueEntitySlice; +impl Index> for UniqueEntityEquivalentSlice { + type Output = UniqueEntityEquivalentSlice; fn index(&self, key: RangeToInclusive) -> &Self { // SAFETY: All elements in the original slice are unique. unsafe { Self::from_slice_unchecked(self.0.index(key)) } } } -impl Index for UniqueEntitySlice { +impl Index for UniqueEntityEquivalentSlice { type Output = T; fn index(&self, index: usize) -> &T { @@ -885,49 +1422,51 @@ impl Index for UniqueEntitySlice { } } -impl IndexMut<(Bound, Bound)> for UniqueEntitySlice { +impl IndexMut<(Bound, Bound)> + for UniqueEntityEquivalentSlice +{ fn index_mut(&mut self, key: (Bound, Bound)) -> &mut Self { // SAFETY: All elements in the original slice are unique. unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } } } -impl IndexMut> for UniqueEntitySlice { +impl IndexMut> for UniqueEntityEquivalentSlice { fn index_mut(&mut self, key: Range) -> &mut Self { // SAFETY: All elements in the original slice are unique. unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } } } -impl IndexMut> for UniqueEntitySlice { +impl IndexMut> for UniqueEntityEquivalentSlice { fn index_mut(&mut self, key: RangeFrom) -> &mut Self { // SAFETY: All elements in the original slice are unique. unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } } } -impl IndexMut for UniqueEntitySlice { +impl IndexMut for UniqueEntityEquivalentSlice { fn index_mut(&mut self, key: RangeFull) -> &mut Self { // SAFETY: All elements in the original slice are unique. unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } } } -impl IndexMut> for UniqueEntitySlice { +impl IndexMut> for UniqueEntityEquivalentSlice { fn index_mut(&mut self, key: RangeInclusive) -> &mut Self { // SAFETY: All elements in the original slice are unique. unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } } } -impl IndexMut> for UniqueEntitySlice { +impl IndexMut> for UniqueEntityEquivalentSlice { fn index_mut(&mut self, key: RangeTo) -> &mut Self { // SAFETY: All elements in the original slice are unique. unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } } } -impl IndexMut> for UniqueEntitySlice { +impl IndexMut> for UniqueEntityEquivalentSlice { fn index_mut(&mut self, key: RangeToInclusive) -> &mut Self { // SAFETY: All elements in the original slice are unique. unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } @@ -936,40 +1475,421 @@ impl IndexMut> for UniqueEntityS /// Immutable slice iterator. /// -/// This struct is created by [`iter`] method on [`UniqueEntitySlice`] and -/// the [`IntoIterator`] impls on it and [`UniqueEntityVec`]. +/// This struct is created by [`iter`] method on [`UniqueEntityEquivalentSlice`] and +/// the [`IntoIterator`] impls on it and [`UniqueEntityEquivalentVec`]. /// -/// [`iter`]: `UniqueEntitySlice::iter` -/// [`into_iter`]: UniqueEntitySlice::into_iter +/// [`iter`]: `UniqueEntityEquivalentSlice::iter` pub type Iter<'a, T> = UniqueEntityIter>; -impl<'a, T: TrustedEntityBorrow> UniqueEntityIter> { +impl<'a, T: EntityEquivalent> UniqueEntityIter> { /// Views the underlying data as a subslice of the original data. /// /// Equivalent to [`slice::Iter::as_slice`]. - pub fn as_slice(&self) -> &'a UniqueEntitySlice { + pub fn as_slice(&self) -> &'a UniqueEntityEquivalentSlice { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(self.as_inner().as_slice()) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.as_inner().as_slice()) } } } /// Mutable slice iterator. pub type IterMut<'a, T> = UniqueEntityIter>; -impl<'a, T: TrustedEntityBorrow> UniqueEntityIter> { +impl<'a, T: EntityEquivalent> UniqueEntityIter> { /// Views the underlying data as a mutable subslice of the original data. /// /// Equivalent to [`slice::IterMut::into_slice`]. - pub fn into_slice(self) -> &'a mut UniqueEntitySlice { + pub fn into_slice(self) -> &'a mut UniqueEntityEquivalentSlice { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(self.into_inner().into_slice()) } + unsafe { + UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.into_inner().into_slice()) + } } /// Views the underlying data as a subslice of the original data. /// /// Equivalent to [`slice::IterMut::as_slice`]. - pub fn as_slice(&self) -> &UniqueEntitySlice { + pub fn as_slice(&self) -> &UniqueEntityEquivalentSlice { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(self.as_inner().as_slice()) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.as_inner().as_slice()) } } } + +/// An iterator that yields `&UniqueEntityEquivalentSlice`. Note that an entity may appear +/// in multiple slices, depending on the wrapped iterator. +#[derive(Debug)] +pub struct UniqueEntityEquivalentSliceIter< + 'a, + T: EntityEquivalent + 'a, + I: Iterator, +> { + pub(crate) iter: I, +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator> + UniqueEntityEquivalentSliceIter<'a, T, I> +{ + /// Constructs a [`UniqueEntityEquivalentSliceIter`] from a slice iterator unsafely. + /// + /// # Safety + /// + /// All elements in each of the slices must be unique. + pub unsafe fn from_slice_iterator_unchecked(iter: I) -> Self { + Self { iter } + } + + /// Returns the inner `I`. + pub fn into_inner(self) -> I { + self.iter + } + + /// Returns a reference to the inner `I`. + pub fn as_inner(&self) -> &I { + &self.iter + } + + /// Returns a mutable reference to the inner `I`. + /// + /// # Safety + /// + /// `self` must always contain an iterator that yields unique elements, + /// even while this reference is live. + pub unsafe fn as_mut_inner(&mut self) -> &mut I { + &mut self.iter + } +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator> Iterator + for UniqueEntityEquivalentSliceIter<'a, T, I> +{ + type Item = &'a UniqueEntityEquivalentSlice; + + fn next(&mut self) -> Option { + self.iter.next().map(|slice| + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(slice) }) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl<'a, T: EntityEquivalent + 'a, I: ExactSizeIterator> ExactSizeIterator + for UniqueEntityEquivalentSliceIter<'a, T, I> +{ +} + +impl<'a, T: EntityEquivalent + 'a, I: DoubleEndedIterator> DoubleEndedIterator + for UniqueEntityEquivalentSliceIter<'a, T, I> +{ + fn next_back(&mut self) -> Option { + self.iter.next_back().map(|slice| + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(slice) }) + } +} + +impl<'a, T: EntityEquivalent + 'a, I: FusedIterator> FusedIterator + for UniqueEntityEquivalentSliceIter<'a, T, I> +{ +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator + AsRef<[&'a [T]]>> + AsRef<[&'a UniqueEntityEquivalentSlice]> for UniqueEntityEquivalentSliceIter<'a, T, I> +{ + fn as_ref(&self) -> &[&'a UniqueEntityEquivalentSlice] { + // SAFETY: + unsafe { cast_slice_of_unique_entity_slice(self.iter.as_ref()) } + } +} + +/// An iterator over overlapping subslices of length `size`. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::windows`]. +pub type Windows<'a, T = Entity> = UniqueEntityEquivalentSliceIter<'a, T, slice::Windows<'a, T>>; + +/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a +/// time), starting at the beginning of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::chunks`]. +pub type Chunks<'a, T = Entity> = UniqueEntityEquivalentSliceIter<'a, T, slice::Chunks<'a, T>>; + +/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a +/// time), starting at the beginning of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::chunks_exact`]. +pub type ChunksExact<'a, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::ChunksExact<'a, T>>; + +impl<'a, T: EntityEquivalent> UniqueEntityEquivalentSliceIter<'a, T, slice::ChunksExact<'a, T>> { + /// Returns the remainder of the original slice that is not going to be + /// returned by the iterator. + /// + /// Equivalent to [`slice::ChunksExact::remainder`]. + pub fn remainder(&self) -> &'a UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.iter.remainder()) } + } +} + +/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a +/// time), starting at the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rchunks`]. +pub type RChunks<'a, T = Entity> = UniqueEntityEquivalentSliceIter<'a, T, slice::RChunks<'a, T>>; + +/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a +/// time), starting at the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rchunks_exact`]. +pub type RChunksExact<'a, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::RChunksExact<'a, T>>; + +impl<'a, T: EntityEquivalent> UniqueEntityEquivalentSliceIter<'a, T, slice::RChunksExact<'a, T>> { + /// Returns the remainder of the original slice that is not going to be + /// returned by the iterator. + /// + /// Equivalent to [`slice::RChunksExact::remainder`]. + pub fn remainder(&self) -> &'a UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.iter.remainder()) } + } +} + +/// An iterator over slice in (non-overlapping) chunks separated by a predicate. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::chunk_by`]. +pub type ChunkBy<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::ChunkBy<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a predicate +/// function. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::split`]. +pub type Split<'a, P, T = Entity> = UniqueEntityEquivalentSliceIter<'a, T, slice::Split<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a predicate +/// function. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::split_inclusive`]. +pub type SplitInclusive<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::SplitInclusive<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a predicate +/// function, starting from the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rsplit`]. +pub type RSplit<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::RSplit<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a predicate +/// function, limited to a given number of splits. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::splitn`]. +pub type SplitN<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::SplitN<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a +/// predicate function, limited to a given number of splits, starting +/// from the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rsplitn`]. +pub type RSplitN<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::RSplitN<'a, T, P>>; + +/// An iterator that yields `&mut UniqueEntityEquivalentSlice`. Note that an entity may appear +/// in multiple slices, depending on the wrapped iterator. +#[derive(Debug)] +pub struct UniqueEntityEquivalentSliceIterMut< + 'a, + T: EntityEquivalent + 'a, + I: Iterator, +> { + pub(crate) iter: I, +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator> + UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ + /// Constructs a [`UniqueEntityEquivalentSliceIterMut`] from a mutable slice iterator unsafely. + /// + /// # Safety + /// + /// All elements in each of the slices must be unique. + pub unsafe fn from_mut_slice_iterator_unchecked(iter: I) -> Self { + Self { iter } + } + + /// Returns the inner `I`. + pub fn into_inner(self) -> I { + self.iter + } + + /// Returns a reference to the inner `I`. + pub fn as_inner(&self) -> &I { + &self.iter + } + + /// Returns a mutable reference to the inner `I`. + /// + /// # Safety + /// + /// `self` must always contain an iterator that yields unique elements, + /// even while this reference is live. + pub unsafe fn as_mut_inner(&mut self) -> &mut I { + &mut self.iter + } +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator> Iterator + for UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ + type Item = &'a mut UniqueEntityEquivalentSlice; + + fn next(&mut self) -> Option { + self.iter.next().map(|slice| + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(slice) }) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl<'a, T: EntityEquivalent + 'a, I: ExactSizeIterator> ExactSizeIterator + for UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ +} + +impl<'a, T: EntityEquivalent + 'a, I: DoubleEndedIterator> DoubleEndedIterator + for UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ + fn next_back(&mut self) -> Option { + self.iter.next_back().map(|slice| + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(slice) }) + } +} + +impl<'a, T: EntityEquivalent + 'a, I: FusedIterator> FusedIterator + for UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator + AsRef<[&'a [T]]>> + AsRef<[&'a UniqueEntityEquivalentSlice]> for UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ + fn as_ref(&self) -> &[&'a UniqueEntityEquivalentSlice] { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { cast_slice_of_unique_entity_slice(self.iter.as_ref()) } + } +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator + AsMut<[&'a mut [T]]>> + AsMut<[&'a mut UniqueEntityEquivalentSlice]> + for UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ + fn as_mut(&mut self) -> &mut [&'a mut UniqueEntityEquivalentSlice] { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { cast_slice_of_mut_unique_entity_slice_mut(self.iter.as_mut()) } + } +} + +/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` +/// elements at a time), starting at the beginning of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::chunks_mut`]. +pub type ChunksMut<'a, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::ChunksMut<'a, T>>; + +/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` +/// elements at a time), starting at the beginning of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::chunks_exact_mut`]. +pub type ChunksExactMut<'a, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::ChunksExactMut<'a, T>>; + +impl<'a, T: EntityEquivalent> + UniqueEntityEquivalentSliceIterMut<'a, T, slice::ChunksExactMut<'a, T>> +{ + /// Returns the remainder of the original slice that is not going to be + /// returned by the iterator. + /// + /// Equivalent to [`slice::ChunksExactMut::into_remainder`]. + pub fn into_remainder(self) -> &'a mut UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.iter.into_remainder()) } + } +} + +/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` +/// elements at a time), starting at the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rchunks_mut`]. +pub type RChunksMut<'a, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::RChunksMut<'a, T>>; + +/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` +/// elements at a time), starting at the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rchunks_exact_mut`]. +pub type RChunksExactMut<'a, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::RChunksExactMut<'a, T>>; + +impl<'a, T: EntityEquivalent> + UniqueEntityEquivalentSliceIterMut<'a, T, slice::RChunksExactMut<'a, T>> +{ + /// Returns the remainder of the original slice that is not going to be + /// returned by the iterator. + /// + /// Equivalent to [`slice::RChunksExactMut::into_remainder`]. + pub fn into_remainder(self) -> &'a mut UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.iter.into_remainder()) } + } +} + +/// An iterator over slice in (non-overlapping) mutable chunks separated +/// by a predicate. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::chunk_by_mut`]. +pub type ChunkByMut<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::ChunkByMut<'a, T, P>>; + +/// An iterator over the mutable subslices of the vector which are separated +/// by elements that match `pred`. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::split_mut`]. +pub type SplitMut<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::SplitMut<'a, T, P>>; + +/// An iterator over the mutable subslices of the vector which are separated +/// by elements that match `pred`. Unlike `SplitMut`, it contains the matched +/// parts in the ends of the subslices. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::split_inclusive_mut`]. +pub type SplitInclusiveMut<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::SplitInclusiveMut<'a, T, P>>; + +/// An iterator over the subslices of the vector which are separated +/// by elements that match `pred`, starting from the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rsplit_mut`]. +pub type RSplitMut<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::RSplitMut<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a predicate +/// function, limited to a given number of splits. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::splitn_mut`]. +pub type SplitNMut<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::SplitNMut<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a +/// predicate function, limited to a given number of splits, starting +/// from the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rsplitn_mut`]. +pub type RSplitNMut<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::RSplitNMut<'a, T, P>>; diff --git a/crates/bevy_ecs/src/entity/unique_vec.rs b/crates/bevy_ecs/src/entity/unique_vec.rs index 9ea1f9c7a6..30f9984e70 100644 --- a/crates/bevy_ecs/src/entity/unique_vec.rs +++ b/crates/bevy_ecs/src/entity/unique_vec.rs @@ -1,3 +1,5 @@ +//! A wrapper around entity [`Vec`]s with a uniqueness invariant. + use core::{ borrow::{Borrow, BorrowMut}, mem::MaybeUninit, @@ -12,13 +14,15 @@ use alloc::{ boxed::Box, collections::{BTreeSet, BinaryHeap, TryReserveError, VecDeque}, rc::Rc, - sync::Arc, vec::{self, Vec}, }; +use bevy_platform::sync::Arc; + use super::{ - unique_slice, EntitySet, FromEntitySetIterator, TrustedEntityBorrow, UniqueEntityIter, - UniqueEntitySlice, + unique_slice::{self, UniqueEntityEquivalentSlice}, + Entity, EntityEquivalent, EntitySet, FromEntitySetIterator, UniqueEntityEquivalentArray, + UniqueEntityIter, }; /// A `Vec` that contains only unique entities. @@ -27,29 +31,36 @@ use super::{ /// This is always true when less than 2 entities are present. /// /// This type is best obtained by its `FromEntitySetIterator` impl, via either -/// `EntityIterator::collect_set` or `UniqueEntityVec::from_entity_iter`. +/// `EntityIterator::collect_set` or `UniqueEntityEquivalentVec::from_entity_iter`. /// /// While this type can be constructed via `Iterator::collect`, doing so is inefficient, /// and not recommended. +/// +/// When `T` is [`Entity`], use the [`UniqueEntityVec`] alias. #[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct UniqueEntityVec(Vec); +pub struct UniqueEntityEquivalentVec(Vec); -impl UniqueEntityVec { - /// Constructs a new, empty `UniqueEntityVec`. +/// A `Vec` that contains only unique [`Entity`]. +/// +/// This is the default case of a [`UniqueEntityEquivalentVec`]. +pub type UniqueEntityVec = UniqueEntityEquivalentVec; + +impl UniqueEntityEquivalentVec { + /// Constructs a new, empty `UniqueEntityEquivalentVec`. /// /// Equivalent to [`Vec::new`]. pub const fn new() -> Self { Self(Vec::new()) } - /// Constructs a new, empty `UniqueEntityVec` with at least the specified capacity. + /// Constructs a new, empty `UniqueEntityEquivalentVec` with at least the specified capacity. /// /// Equivalent to [`Vec::with_capacity`] pub fn with_capacity(capacity: usize) -> Self { Self(Vec::with_capacity(capacity)) } - /// Creates a `UniqueEntityVec` directly from a pointer, a length, and a capacity. + /// Creates a `UniqueEntityEquivalentVec` directly from a pointer, a length, and a capacity. /// /// Equivalent to [`Vec::from_raw_parts`]. /// @@ -62,7 +73,7 @@ impl UniqueEntityVec { Self(unsafe { Vec::from_raw_parts(ptr, length, capacity) }) } - /// Constructs a `UniqueEntityVec` from a [`Vec`] unsafely. + /// Constructs a `UniqueEntityEquivalentVec` from a [`Vec`] unsafely. /// /// # Safety /// @@ -108,7 +119,7 @@ impl UniqueEntityVec { } /// Reserves the minimum capacity for at least `additional` more elements to - /// be inserted in the given `UniqueEntityVec`. + /// be inserted in the given `UniqueEntityEquivalentVec`. /// /// Equivalent to [`Vec::reserve_exact`]. pub fn reserve_exact(&mut self, additional: usize) { @@ -145,19 +156,21 @@ impl UniqueEntityVec { self.0.shrink_to(min_capacity); } - /// Converts the vector into `Box>`. - pub fn into_boxed_slice(self) -> Box> { - // SAFETY: UniqueEntitySlice is a transparent wrapper around [T]. - unsafe { UniqueEntitySlice::from_boxed_slice_unchecked(self.0.into_boxed_slice()) } + /// Converts the vector into `Box>`. + pub fn into_boxed_slice(self) -> Box> { + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. + unsafe { + UniqueEntityEquivalentSlice::from_boxed_slice_unchecked(self.0.into_boxed_slice()) + } } /// Extracts a slice containing the entire vector. - pub fn as_slice(&self) -> &UniqueEntitySlice { + pub fn as_slice(&self) -> &UniqueEntityEquivalentSlice { self } /// Extracts a mutable slice of the entire vector. - pub fn as_mut_slice(&mut self) -> &mut UniqueEntitySlice { + pub fn as_mut_slice(&mut self) -> &mut UniqueEntityEquivalentSlice { self } @@ -297,7 +310,7 @@ impl UniqueEntityVec { /// # Safety /// /// `other` must contain no elements that equal any element in `self`. - pub unsafe fn append(&mut self, other: &mut UniqueEntityVec) { + pub unsafe fn append(&mut self, other: &mut UniqueEntityEquivalentVec) { self.0.append(&mut other.0); } @@ -364,10 +377,10 @@ impl UniqueEntityVec { self.0.resize_with(new_len, f); } - /// Consumes and leaks the Vec, returning a mutable reference to the contents, `&'a mut UniqueEntitySlice`. - pub fn leak<'a>(self) -> &'a mut UniqueEntitySlice { + /// Consumes and leaks the Vec, returning a mutable reference to the contents, `&'a mut UniqueEntityEquivalentSlice`. + pub fn leak<'a>(self) -> &'a mut UniqueEntityEquivalentSlice { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(self.0.leak()) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.leak()) } } /// Returns the remaining spare capacity of the vector as a slice of @@ -401,31 +414,31 @@ impl UniqueEntityVec { } } -impl Default for UniqueEntityVec { +impl Default for UniqueEntityEquivalentVec { fn default() -> Self { Self(Vec::default()) } } -impl Deref for UniqueEntityVec { - type Target = UniqueEntitySlice; +impl Deref for UniqueEntityEquivalentVec { + type Target = UniqueEntityEquivalentSlice; fn deref(&self) -> &Self::Target { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(&self.0) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(&self.0) } } } -impl DerefMut for UniqueEntityVec { +impl DerefMut for UniqueEntityEquivalentVec { fn deref_mut(&mut self) -> &mut Self::Target { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(&mut self.0) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(&mut self.0) } } } -impl<'a, T: TrustedEntityBorrow> IntoIterator for &'a UniqueEntityVec +impl<'a, T: EntityEquivalent> IntoIterator for &'a UniqueEntityEquivalentVec where - &'a T: TrustedEntityBorrow, + &'a T: EntityEquivalent, { type Item = &'a T; @@ -437,7 +450,7 @@ where } } -impl IntoIterator for UniqueEntityVec { +impl IntoIterator for UniqueEntityEquivalentVec { type Item = T; type IntoIter = IntoIter; @@ -448,328 +461,417 @@ impl IntoIterator for UniqueEntityVec { } } -impl AsMut for UniqueEntityVec { - fn as_mut(&mut self) -> &mut UniqueEntityVec { +impl AsMut for UniqueEntityEquivalentVec { + fn as_mut(&mut self) -> &mut UniqueEntityEquivalentVec { self } } -impl AsMut> for UniqueEntityVec { - fn as_mut(&mut self) -> &mut UniqueEntitySlice { +impl AsMut> for UniqueEntityEquivalentVec { + fn as_mut(&mut self) -> &mut UniqueEntityEquivalentSlice { self } } -impl AsRef for UniqueEntityVec { +impl AsRef for UniqueEntityEquivalentVec { fn as_ref(&self) -> &Self { self } } -impl AsRef> for UniqueEntityVec { +impl AsRef> for UniqueEntityEquivalentVec { fn as_ref(&self) -> &Vec { &self.0 } } -impl Borrow> for UniqueEntityVec { +impl Borrow> for UniqueEntityEquivalentVec { fn borrow(&self) -> &Vec { &self.0 } } -impl AsRef<[T]> for UniqueEntityVec { +impl AsRef<[T]> for UniqueEntityEquivalentVec { fn as_ref(&self) -> &[T] { &self.0 } } -impl AsRef> for UniqueEntityVec { - fn as_ref(&self) -> &UniqueEntitySlice { +impl AsRef> for UniqueEntityEquivalentVec { + fn as_ref(&self) -> &UniqueEntityEquivalentSlice { self } } -impl Borrow<[T]> for UniqueEntityVec { +impl Borrow<[T]> for UniqueEntityEquivalentVec { fn borrow(&self) -> &[T] { &self.0 } } -impl Borrow> for UniqueEntityVec { - fn borrow(&self) -> &UniqueEntitySlice { +impl Borrow> for UniqueEntityEquivalentVec { + fn borrow(&self) -> &UniqueEntityEquivalentSlice { self } } -impl BorrowMut> for UniqueEntityVec { - fn borrow_mut(&mut self) -> &mut UniqueEntitySlice { +impl BorrowMut> + for UniqueEntityEquivalentVec +{ + fn borrow_mut(&mut self) -> &mut UniqueEntityEquivalentSlice { self } } -impl, U> PartialEq> for UniqueEntityVec { +impl, U> PartialEq> for UniqueEntityEquivalentVec { fn eq(&self, other: &Vec) -> bool { self.0.eq(other) } } -impl, U> PartialEq<&[U]> for UniqueEntityVec { +impl, U> PartialEq<&[U]> for UniqueEntityEquivalentVec { fn eq(&self, other: &&[U]) -> bool { self.0.eq(other) } } -impl, U: TrustedEntityBorrow> PartialEq<&UniqueEntitySlice> - for UniqueEntityVec +impl, U: EntityEquivalent> + PartialEq<&UniqueEntityEquivalentSlice> for UniqueEntityEquivalentVec { - fn eq(&self, other: &&UniqueEntitySlice) -> bool { + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { self.0.eq(other) } } -impl, U> PartialEq<&mut [U]> for UniqueEntityVec { +impl, U> PartialEq<&mut [U]> for UniqueEntityEquivalentVec { fn eq(&self, other: &&mut [U]) -> bool { self.0.eq(other) } } -impl, U: TrustedEntityBorrow> - PartialEq<&mut UniqueEntitySlice> for UniqueEntityVec +impl, U: EntityEquivalent> + PartialEq<&mut UniqueEntityEquivalentSlice> for UniqueEntityEquivalentVec { - fn eq(&self, other: &&mut UniqueEntitySlice) -> bool { + fn eq(&self, other: &&mut UniqueEntityEquivalentSlice) -> bool { self.0.eq(other) } } -impl, U, const N: usize> PartialEq<&[U; N]> - for UniqueEntityVec +impl, U, const N: usize> PartialEq<&[U; N]> + for UniqueEntityEquivalentVec { fn eq(&self, other: &&[U; N]) -> bool { self.0.eq(other) } } -impl, U, const N: usize> PartialEq<&mut [U; N]> - for UniqueEntityVec +impl, U: EntityEquivalent, const N: usize> + PartialEq<&UniqueEntityEquivalentArray> for UniqueEntityEquivalentVec +{ + fn eq(&self, other: &&UniqueEntityEquivalentArray) -> bool { + self.0.eq(&other.as_inner()) + } +} + +impl, U, const N: usize> PartialEq<&mut [U; N]> + for UniqueEntityEquivalentVec { fn eq(&self, other: &&mut [U; N]) -> bool { self.0.eq(&**other) } } -impl, U> PartialEq<[U]> for UniqueEntityVec { +impl, U: EntityEquivalent, const N: usize> + PartialEq<&mut UniqueEntityEquivalentArray> for UniqueEntityEquivalentVec +{ + fn eq(&self, other: &&mut UniqueEntityEquivalentArray) -> bool { + self.0.eq(other.as_inner()) + } +} + +impl, U> PartialEq<[U]> for UniqueEntityEquivalentVec { fn eq(&self, other: &[U]) -> bool { self.0.eq(other) } } -impl, U: TrustedEntityBorrow> PartialEq> - for UniqueEntityVec +impl, U: EntityEquivalent> + PartialEq> for UniqueEntityEquivalentVec { - fn eq(&self, other: &UniqueEntitySlice) -> bool { + fn eq(&self, other: &UniqueEntityEquivalentSlice) -> bool { self.0.eq(&**other) } } -impl, U, const N: usize> PartialEq<[U; N]> - for UniqueEntityVec +impl, U, const N: usize> PartialEq<[U; N]> + for UniqueEntityEquivalentVec { fn eq(&self, other: &[U; N]) -> bool { self.0.eq(other) } } -impl, U: TrustedEntityBorrow> PartialEq> for Vec { - fn eq(&self, other: &UniqueEntityVec) -> bool { - self.eq(&other.0) - } -} - -impl, U: TrustedEntityBorrow> PartialEq> for &[T] { - fn eq(&self, other: &UniqueEntityVec) -> bool { - self.eq(&other.0) - } -} - -impl, U: TrustedEntityBorrow> PartialEq> for &mut [T] { - fn eq(&self, other: &UniqueEntityVec) -> bool { - self.eq(&other.0) - } -} - -impl, U: TrustedEntityBorrow> PartialEq> - for [T] +impl, U: EntityEquivalent, const N: usize> + PartialEq> for UniqueEntityEquivalentVec { - fn eq(&self, other: &UniqueEntityVec) -> bool { + fn eq(&self, other: &UniqueEntityEquivalentArray) -> bool { + self.0.eq(other.as_inner()) + } +} + +impl, U: EntityEquivalent> PartialEq> for Vec { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { self.eq(&other.0) } } -impl + Clone, U: TrustedEntityBorrow> PartialEq> +impl, U: EntityEquivalent> PartialEq> for &[T] { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent> PartialEq> for &mut [T] { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent> + PartialEq> for [T] +{ + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.eq(&other.0) + } +} + +impl + Clone, U: EntityEquivalent> PartialEq> for Cow<'_, [T]> { - fn eq(&self, other: &UniqueEntityVec) -> bool { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { self.eq(&other.0) } } -impl, U: TrustedEntityBorrow> PartialEq> for VecDeque { - fn eq(&self, other: &UniqueEntityVec) -> bool { +impl, U: EntityEquivalent> PartialEq> for VecDeque { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { self.eq(&other.0) } } -impl From<&UniqueEntitySlice> for UniqueEntityVec { - fn from(value: &UniqueEntitySlice) -> Self { +impl From<&UniqueEntityEquivalentSlice> + for UniqueEntityEquivalentVec +{ + fn from(value: &UniqueEntityEquivalentSlice) -> Self { value.to_vec() } } -impl From<&mut UniqueEntitySlice> for UniqueEntityVec { - fn from(value: &mut UniqueEntitySlice) -> Self { +impl From<&mut UniqueEntityEquivalentSlice> + for UniqueEntityEquivalentVec +{ + fn from(value: &mut UniqueEntityEquivalentSlice) -> Self { value.to_vec() } } -impl From>> for UniqueEntityVec { - fn from(value: Box>) -> Self { +impl From>> + for UniqueEntityEquivalentVec +{ + fn from(value: Box>) -> Self { value.into_vec() } } -impl From>> for UniqueEntityVec +impl From>> + for UniqueEntityEquivalentVec where - UniqueEntitySlice: ToOwned>, + UniqueEntityEquivalentSlice: ToOwned>, { - fn from(value: Cow>) -> Self { + fn from(value: Cow>) -> Self { value.into_owned() } } -impl From<&[T; 1]> for UniqueEntityVec { +impl From<&[T; 1]> for UniqueEntityEquivalentVec { fn from(value: &[T; 1]) -> Self { Self(Vec::from(value)) } } -impl From<&[T; 0]> for UniqueEntityVec { +impl From<&[T; 0]> for UniqueEntityEquivalentVec { fn from(value: &[T; 0]) -> Self { Self(Vec::from(value)) } } -impl From<&mut [T; 1]> for UniqueEntityVec { +impl From<&mut [T; 1]> for UniqueEntityEquivalentVec { fn from(value: &mut [T; 1]) -> Self { Self(Vec::from(value)) } } -impl From<&mut [T; 0]> for UniqueEntityVec { +impl From<&mut [T; 0]> for UniqueEntityEquivalentVec { fn from(value: &mut [T; 0]) -> Self { Self(Vec::from(value)) } } -impl From<[T; 1]> for UniqueEntityVec { +impl From<[T; 1]> for UniqueEntityEquivalentVec { fn from(value: [T; 1]) -> Self { Self(Vec::from(value)) } } -impl From<[T; 0]> for UniqueEntityVec { +impl From<[T; 0]> for UniqueEntityEquivalentVec { fn from(value: [T; 0]) -> Self { Self(Vec::from(value)) } } -impl From> for Vec { - fn from(value: UniqueEntityVec) -> Self { +impl From<&UniqueEntityEquivalentArray> + for UniqueEntityEquivalentVec +{ + fn from(value: &UniqueEntityEquivalentArray) -> Self { + Self(Vec::from(value.as_inner().clone())) + } +} + +impl From<&mut UniqueEntityEquivalentArray> + for UniqueEntityEquivalentVec +{ + fn from(value: &mut UniqueEntityEquivalentArray) -> Self { + Self(Vec::from(value.as_inner().clone())) + } +} + +impl From> + for UniqueEntityEquivalentVec +{ + fn from(value: UniqueEntityEquivalentArray) -> Self { + Self(Vec::from(value.into_inner())) + } +} + +impl From> for Vec { + fn from(value: UniqueEntityEquivalentVec) -> Self { value.0 } } -impl<'a, T: TrustedEntityBorrow + Clone> From> for Cow<'a, [T]> { - fn from(value: UniqueEntityVec) -> Self { +impl<'a, T: EntityEquivalent + Clone> From> for Cow<'a, [T]> { + fn from(value: UniqueEntityEquivalentVec) -> Self { Cow::from(value.0) } } -impl<'a, T: TrustedEntityBorrow + Clone> From> - for Cow<'a, UniqueEntitySlice> +impl<'a, T: EntityEquivalent + Clone> From> + for Cow<'a, UniqueEntityEquivalentSlice> { - fn from(value: UniqueEntityVec) -> Self { + fn from(value: UniqueEntityEquivalentVec) -> Self { Cow::Owned(value) } } -impl From> for Arc<[T]> { - fn from(value: UniqueEntityVec) -> Self { +impl From> for Arc<[T]> { + fn from(value: UniqueEntityEquivalentVec) -> Self { Arc::from(value.0) } } -impl From> for Arc> { - fn from(value: UniqueEntityVec) -> Self { +impl From> + for Arc> +{ + fn from(value: UniqueEntityEquivalentVec) -> Self { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_arc_slice_unchecked(Arc::from(value.0)) } + unsafe { UniqueEntityEquivalentSlice::from_arc_slice_unchecked(Arc::from(value.0)) } } } -impl From> for BinaryHeap { - fn from(value: UniqueEntityVec) -> Self { +impl From> for BinaryHeap { + fn from(value: UniqueEntityEquivalentVec) -> Self { BinaryHeap::from(value.0) } } -impl From> for Box<[T]> { - fn from(value: UniqueEntityVec) -> Self { +impl From> for Box<[T]> { + fn from(value: UniqueEntityEquivalentVec) -> Self { Box::from(value.0) } } -impl From> for Rc<[T]> { - fn from(value: UniqueEntityVec) -> Self { +impl From> for Rc<[T]> { + fn from(value: UniqueEntityEquivalentVec) -> Self { Rc::from(value.0) } } -impl From> for Rc> { - fn from(value: UniqueEntityVec) -> Self { +impl From> + for Rc> +{ + fn from(value: UniqueEntityEquivalentVec) -> Self { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_rc_slice_unchecked(Rc::from(value.0)) } + unsafe { UniqueEntityEquivalentSlice::from_rc_slice_unchecked(Rc::from(value.0)) } } } -impl From> for VecDeque { - fn from(value: UniqueEntityVec) -> Self { +impl From> for VecDeque { + fn from(value: UniqueEntityEquivalentVec) -> Self { VecDeque::from(value.0) } } -impl TryFrom> for Box<[T; N]> { - type Error = UniqueEntityVec; +impl TryFrom> for Box<[T; N]> { + type Error = UniqueEntityEquivalentVec; - fn try_from(value: UniqueEntityVec) -> Result { - Box::try_from(value.0).map_err(UniqueEntityVec) + fn try_from(value: UniqueEntityEquivalentVec) -> Result { + Box::try_from(value.0).map_err(UniqueEntityEquivalentVec) } } -impl TryFrom> for [T; N] { - type Error = UniqueEntityVec; +impl TryFrom> + for Box> +{ + type Error = UniqueEntityEquivalentVec; - fn try_from(value: UniqueEntityVec) -> Result { - <[T; N] as TryFrom>>::try_from(value.0).map_err(UniqueEntityVec) + fn try_from(value: UniqueEntityEquivalentVec) -> Result { + Box::try_from(value.0) + .map(|v| + // SAFETY: All elements in the original Vec are unique. + unsafe { UniqueEntityEquivalentArray::from_boxed_array_unchecked(v) }) + .map_err(UniqueEntityEquivalentVec) } } -impl From> for UniqueEntityVec { +impl TryFrom> for [T; N] { + type Error = UniqueEntityEquivalentVec; + + fn try_from(value: UniqueEntityEquivalentVec) -> Result { + <[T; N] as TryFrom>>::try_from(value.0).map_err(UniqueEntityEquivalentVec) + } +} + +impl TryFrom> + for UniqueEntityEquivalentArray +{ + type Error = UniqueEntityEquivalentVec; + + fn try_from(value: UniqueEntityEquivalentVec) -> Result { + <[T; N] as TryFrom>>::try_from(value.0) + .map(|v| + // SAFETY: All elements in the original Vec are unique. + unsafe { UniqueEntityEquivalentArray::from_array_unchecked(v) }) + .map_err(UniqueEntityEquivalentVec) + } +} + +impl From> for UniqueEntityEquivalentVec { fn from(value: BTreeSet) -> Self { Self(value.into_iter().collect::>()) } } -impl FromIterator for UniqueEntityVec { +impl FromIterator for UniqueEntityEquivalentVec { /// This impl only uses `Eq` to validate uniqueness, resulting in O(n^2) complexity. /// It can make sense for very low N, or if `T` implements neither `Ord` nor `Hash`. /// When possible, use `FromEntitySetIterator::from_entity_iter` instead. @@ -788,14 +890,14 @@ impl FromIterator for UniqueEntityVec { } } -impl FromEntitySetIterator for UniqueEntityVec { +impl FromEntitySetIterator for UniqueEntityEquivalentVec { fn from_entity_set_iter>(iter: I) -> Self { // SAFETY: `iter` is an `EntitySet`. unsafe { Self::from_vec_unchecked(Vec::from_iter(iter)) } } } -impl Extend for UniqueEntityVec { +impl Extend for UniqueEntityEquivalentVec { /// Use with caution, because this impl only uses `Eq` to validate uniqueness, /// resulting in O(n^2) complexity. /// It can make sense for very low N, or if `T` implements neither `Ord` nor `Hash`. @@ -809,7 +911,7 @@ impl Extend for UniqueEntityVec { let reserve = if self.is_empty() { iter.size_hint().0 } else { - (iter.size_hint().0 + 1) / 2 + iter.size_hint().0.div_ceil(2) }; self.reserve(reserve); // Internal iteration (fold/for_each) is known to result in better code generation @@ -822,7 +924,7 @@ impl Extend for UniqueEntityVec { } } -impl<'a, T: TrustedEntityBorrow + Copy + 'a> Extend<&'a T> for UniqueEntityVec { +impl<'a, T: EntityEquivalent + Copy + 'a> Extend<&'a T> for UniqueEntityEquivalentVec { /// Use with caution, because this impl only uses `Eq` to validate uniqueness, /// resulting in O(n^2) complexity. /// It can make sense for very low N, or if `T` implements neither `Ord` nor `Hash`. @@ -836,7 +938,7 @@ impl<'a, T: TrustedEntityBorrow + Copy + 'a> Extend<&'a T> for UniqueEntityVec Extend<&'a T> for UniqueEntityVec Index<(Bound, Bound)> for UniqueEntityVec { - type Output = UniqueEntitySlice; +impl Index<(Bound, Bound)> for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; fn index(&self, key: (Bound, Bound)) -> &Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(self.0.index(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } } } -impl Index> for UniqueEntityVec { - type Output = UniqueEntitySlice; +impl Index> for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; fn index(&self, key: Range) -> &Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(self.0.index(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } } } -impl Index> for UniqueEntityVec { - type Output = UniqueEntitySlice; +impl Index> for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; fn index(&self, key: RangeFrom) -> &Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(self.0.index(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } } } -impl Index for UniqueEntityVec { - type Output = UniqueEntitySlice; +impl Index for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; fn index(&self, key: RangeFull) -> &Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(self.0.index(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } } } -impl Index> for UniqueEntityVec { - type Output = UniqueEntitySlice; +impl Index> for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; fn index(&self, key: RangeInclusive) -> &Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(self.0.index(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } } } -impl Index> for UniqueEntityVec { - type Output = UniqueEntitySlice; +impl Index> for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; fn index(&self, key: RangeTo) -> &Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(self.0.index(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } } } -impl Index> for UniqueEntityVec { - type Output = UniqueEntitySlice; +impl Index> for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; fn index(&self, key: RangeToInclusive) -> &Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(self.0.index(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } } } -impl Index for UniqueEntityVec { +impl Index for UniqueEntityEquivalentVec { type Output = T; fn index(&self, key: usize) -> &T { self.0.index(key) } } -impl IndexMut<(Bound, Bound)> for UniqueEntityVec { +impl IndexMut<(Bound, Bound)> for UniqueEntityEquivalentVec { fn index_mut(&mut self, key: (Bound, Bound)) -> &mut Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } } } -impl IndexMut> for UniqueEntityVec { +impl IndexMut> for UniqueEntityEquivalentVec { fn index_mut(&mut self, key: Range) -> &mut Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } } } -impl IndexMut> for UniqueEntityVec { +impl IndexMut> for UniqueEntityEquivalentVec { fn index_mut(&mut self, key: RangeFrom) -> &mut Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } } } -impl IndexMut for UniqueEntityVec { +impl IndexMut for UniqueEntityEquivalentVec { fn index_mut(&mut self, key: RangeFull) -> &mut Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } } } -impl IndexMut> for UniqueEntityVec { +impl IndexMut> for UniqueEntityEquivalentVec { fn index_mut(&mut self, key: RangeInclusive) -> &mut Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } } } -impl IndexMut> for UniqueEntityVec { +impl IndexMut> for UniqueEntityEquivalentVec { fn index_mut(&mut self, key: RangeTo) -> &mut Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } } } -impl IndexMut> for UniqueEntityVec { +impl IndexMut> for UniqueEntityEquivalentVec { fn index_mut(&mut self, key: RangeToInclusive) -> &mut Self::Output { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } } } /// An iterator that moves out of a vector. /// /// This `struct` is created by the [`IntoIterator::into_iter`] trait -/// method on [`UniqueEntityVec`]. -pub type IntoIter = UniqueEntityIter>; +/// method on [`UniqueEntityEquivalentVec`]. +pub type IntoIter = UniqueEntityIter>; -impl UniqueEntityIter> { +impl UniqueEntityIter> { /// Returns the remaining items of this iterator as a slice. /// /// Equivalent to [`vec::IntoIter::as_slice`]. - pub fn as_slice(&self) -> &UniqueEntitySlice { + pub fn as_slice(&self) -> &UniqueEntityEquivalentSlice { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(self.as_inner().as_slice()) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.as_inner().as_slice()) } } /// Returns the remaining items of this iterator as a mutable slice. /// /// Equivalent to [`vec::IntoIter::as_mut_slice`]. - pub fn as_mut_slice(&mut self) -> &mut UniqueEntitySlice { + pub fn as_mut_slice(&mut self) -> &mut UniqueEntityEquivalentSlice { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked_mut(self.as_mut_inner().as_mut_slice()) } + unsafe { + UniqueEntityEquivalentSlice::from_slice_unchecked_mut( + self.as_mut_inner().as_mut_slice(), + ) + } } } -/// A draining iterator for [`UniqueEntityVec`]. +/// A draining iterator for [`UniqueEntityEquivalentVec`]. /// -/// This struct is created by [`UniqueEntityVec::drain`]. +/// This struct is created by [`UniqueEntityEquivalentVec::drain`]. /// See its documentation for more. -pub type Drain<'a, T> = UniqueEntityIter>; +pub type Drain<'a, T = Entity> = UniqueEntityIter>; -impl<'a, T: TrustedEntityBorrow> UniqueEntityIter> { +impl<'a, T: EntityEquivalent> UniqueEntityIter> { /// Returns the remaining items of this iterator as a slice. /// /// Equivalent to [`vec::Drain::as_slice`]. - pub fn as_slice(&self) -> &UniqueEntitySlice { + pub fn as_slice(&self) -> &UniqueEntityEquivalentSlice { // SAFETY: All elements in the original slice are unique. - unsafe { UniqueEntitySlice::from_slice_unchecked(self.as_inner().as_slice()) } + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.as_inner().as_slice()) } } } -/// A splicing iterator for [`UniqueEntityVec`]. +/// A splicing iterator for [`UniqueEntityEquivalentVec`]. /// -/// This struct is created by [`UniqueEntityVec::splice`]. +/// This struct is created by [`UniqueEntityEquivalentVec::splice`]. /// See its documentation for more. pub type Splice<'a, I> = UniqueEntityIter>; diff --git a/crates/bevy_ecs/src/entity/visit_entities.rs b/crates/bevy_ecs/src/entity/visit_entities.rs deleted file mode 100644 index 734c96e113..0000000000 --- a/crates/bevy_ecs/src/entity/visit_entities.rs +++ /dev/null @@ -1,149 +0,0 @@ -pub use bevy_ecs_macros::{VisitEntities, VisitEntitiesMut}; - -use crate::entity::Entity; - -/// Apply an operation to all entities in a container. -/// -/// This is implemented by default for types that implement [`IntoIterator`]. -/// -/// It may be useful to implement directly for types that can't produce an -/// iterator for lifetime reasons, such as those involving internal mutexes. -pub trait VisitEntities { - /// Apply an operation to all contained entities. - fn visit_entities(&self, f: F); -} - -impl VisitEntities for T -where - for<'a> &'a T: IntoIterator, -{ - fn visit_entities(&self, f: F) { - self.into_iter().copied().for_each(f); - } -} - -impl VisitEntities for Entity { - fn visit_entities(&self, mut f: F) { - f(*self); - } -} - -/// Apply an operation to mutable references to all entities in a container. -/// -/// This is implemented by default for types that implement [`IntoIterator`]. -/// -/// It may be useful to implement directly for types that can't produce an -/// iterator for lifetime reasons, such as those involving internal mutexes. -pub trait VisitEntitiesMut: VisitEntities { - /// Apply an operation to mutable references to all contained entities. - fn visit_entities_mut(&mut self, f: F); -} - -impl VisitEntitiesMut for T -where - for<'a> &'a mut T: IntoIterator, -{ - fn visit_entities_mut(&mut self, f: F) { - self.into_iter().for_each(f); - } -} - -impl VisitEntitiesMut for Entity { - fn visit_entities_mut(&mut self, mut f: F) { - f(self); - } -} - -#[cfg(test)] -mod tests { - use crate::{ - entity::{hash_map::EntityHashMap, MapEntities, SceneEntityMapper}, - world::World, - }; - use alloc::{string::String, vec, vec::Vec}; - use bevy_platform_support::collections::HashSet; - - use super::*; - - #[derive(VisitEntities, Debug, PartialEq)] - struct Foo { - ordered: Vec, - unordered: HashSet, - single: Entity, - #[visit_entities(ignore)] - not_an_entity: String, - } - - // Need a manual impl since VisitEntitiesMut isn't implemented for `HashSet`. - // We don't expect users to actually do this - it's only for test purposes - // to prove out the automatic `MapEntities` impl we get with `VisitEntitiesMut`. - impl VisitEntitiesMut for Foo { - fn visit_entities_mut(&mut self, mut f: F) { - self.ordered.visit_entities_mut(&mut f); - self.unordered = self - .unordered - .drain() - .map(|mut entity| { - f(&mut entity); - entity - }) - .collect(); - f(&mut self.single); - } - } - - #[test] - fn visit_entities() { - let mut world = World::new(); - let entities = world.entities(); - let mut foo = Foo { - ordered: vec![entities.reserve_entity(), entities.reserve_entity()], - unordered: [ - entities.reserve_entity(), - entities.reserve_entity(), - entities.reserve_entity(), - ] - .into_iter() - .collect(), - single: entities.reserve_entity(), - not_an_entity: "Bar".into(), - }; - - let mut entity_map = EntityHashMap::::default(); - let mut remapped = Foo { - ordered: vec![], - unordered: HashSet::default(), - single: Entity::PLACEHOLDER, - not_an_entity: foo.not_an_entity.clone(), - }; - - // Note: this assumes that the VisitEntities derive is field-ordered, - // which isn't explicitly stated/guaranteed. - // If that changes, this test will fail, but that might be OK if - // we're intentionally breaking that assumption. - let mut i = 0; - foo.visit_entities(|entity| { - let new_entity = entities.reserve_entity(); - if i < foo.ordered.len() { - assert_eq!(entity, foo.ordered[i]); - remapped.ordered.push(new_entity); - } else if i < foo.ordered.len() + foo.unordered.len() { - assert!(foo.unordered.contains(&entity)); - remapped.unordered.insert(new_entity); - } else { - assert_eq!(entity, foo.single); - remapped.single = new_entity; - } - - entity_map.insert(entity, new_entity); - - i += 1; - }); - - SceneEntityMapper::world_scope(&mut entity_map, &mut world, |_, mapper| { - foo.map_entities(mapper); - }); - - assert_eq!(foo, remapped); - } -} diff --git a/crates/bevy_ecs/src/entity_disabling.rs b/crates/bevy_ecs/src/entity_disabling.rs index 77326cea3f..5d62011174 100644 --- a/crates/bevy_ecs/src/entity_disabling.rs +++ b/crates/bevy_ecs/src/entity_disabling.rs @@ -1,14 +1,55 @@ -//! Types for entity disabling. -//! //! Disabled entities do not show up in queries unless the query explicitly mentions them. //! -//! If for example we have `Disabled` as an entity disabling component, when you add `Disabled` -//! to an entity, the entity will only be visible to queries with a filter like -//! [`With`]`` or query data like [`Has`]``. +//! Entities which are disabled in this way are not removed from the [`World`], +//! and their relationships remain intact. +//! In many cases, you may want to disable entire trees of entities at once, +//! using [`EntityCommands::insert_recursive`](crate::prelude::EntityCommands::insert_recursive). //! -//! ### Note +//! While Bevy ships with a built-in [`Disabled`] component, you can also create your own +//! disabling components, which will operate in the same way but can have distinct semantics. //! -//! Currently only queries for which the cache is built after enabling a filter will have entities +//! ``` +//! use bevy_ecs::prelude::*; +//! +//! // Our custom disabling component! +//! #[derive(Component, Clone)] +//! struct Prefab; +//! +//! #[derive(Component)] +//! struct A; +//! +//! let mut world = World::new(); +//! world.register_disabling_component::(); +//! world.spawn((A, Prefab)); +//! world.spawn((A,)); +//! world.spawn((A,)); +//! +//! let mut normal_query = world.query::<&A>(); +//! assert_eq!(2, normal_query.iter(&world).count()); +//! +//! let mut prefab_query = world.query_filtered::<&A, With>(); +//! assert_eq!(1, prefab_query.iter(&world).count()); +//! +//! let mut maybe_prefab_query = world.query::<(&A, Has)>(); +//! assert_eq!(3, maybe_prefab_query.iter(&world).count()); +//! ``` +//! +//! ## Default query filters +//! +//! In Bevy, entity disabling is implemented through the construction of a global "default query filter". +//! Queries which do not explicitly mention the disabled component will not include entities with that component. +//! If an entity has multiple disabling components, it will only be included in queries that mention all of them. +//! +//! For example, `Query<&Position>` will not include entities with the [`Disabled`] component, +//! even if they have a `Position` component, +//! but `Query<&Position, With>` or `Query<(&Position, Has)>` will see them. +//! +//! Entities with disabling components are still present in the [`World`] and can be accessed directly, +//! using methods on [`World`] or [`Commands`](crate::prelude::Commands). +//! +//! ### Warnings +//! +//! Currently, only queries for which the cache is built after enabling a default query filter will have entities //! with those components filtered. As a result, they should generally only be modified before the //! app starts. //! @@ -16,6 +57,11 @@ //! the enire [`World`], especially when they cause queries to mix sparse and table components. //! See [`Query` performance] for more info. //! +//! Custom disabling components can cause significant interoperability issues within the ecosystem, +//! as users must be aware of each disabling component in use. +//! Libraries should think carefully about whether they need to use a new disabling component, +//! and clearly communicate their presence to their users to avoid the new for library compatibility flags. +//! //! [`With`]: crate::prelude::With //! [`Has`]: crate::prelude::Has //! [`World`]: crate::prelude::World @@ -24,44 +70,121 @@ use crate::{ component::{ComponentId, Components, StorageType}, query::FilteredAccess, + world::{FromWorld, World}, }; use bevy_ecs_macros::{Component, Resource}; +use smallvec::SmallVec; #[cfg(feature = "bevy_reflect")] -use {crate::reflect::ReflectComponent, bevy_reflect::Reflect}; +use { + crate::reflect::ReflectComponent, bevy_reflect::std_traits::ReflectDefault, + bevy_reflect::Reflect, +}; -/// A marker component for disabled entities. See [the module docs] for more info. +/// A marker component for disabled entities. +/// +/// Semantically, this component is used to mark entities that are temporarily disabled (typically for gameplay reasons), +/// but will likely be re-enabled at some point. +/// +/// Like all disabling components, this only disables the entity itself, +/// not its children or other entities that reference it. +/// To disable an entire tree of entities, use [`EntityCommands::insert_recursive`](crate::prelude::EntityCommands::insert_recursive). +/// +/// Every [`World`] has a default query filter that excludes entities with this component, +/// registered in the [`DefaultQueryFilters`] resource. +/// See [the module docs] for more info. /// /// [the module docs]: crate::entity_disabling -#[derive(Component)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Component))] +#[derive(Component, Clone, Debug, Default)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Component), + reflect(Debug, Clone, Default) +)] +// This component is registered as a disabling component during World::bootstrap pub struct Disabled; -/// The default filters for all queries, these are used to globally exclude entities from queries. +/// Default query filters work by excluding entities with certain components from most queries. +/// +/// If a query does not explicitly mention a given disabling component, it will not include entities with that component. +/// To be more precise, this checks if the query's [`FilteredAccess`] contains the component, +/// and if it does not, adds a [`Without`](crate::prelude::Without) filter for that component to the query. +/// +/// This resource is initialized in the [`World`] whenever a new world is created, +/// with the [`Disabled`] component as a disabling component. +/// +/// Note that you can remove default query filters by overwriting the [`DefaultQueryFilters`] resource. +/// This can be useful as a last resort escape hatch, but is liable to break compatibility with other libraries. +/// /// See the [module docs](crate::entity_disabling) for more info. -#[derive(Resource, Default, Debug)] +/// +/// +/// # Warning +/// +/// Default query filters are a global setting that affects all queries in the [`World`], +/// and incur a small performance cost for each query. +/// +/// They can cause significant interoperability issues within the ecosystem, +/// as users must be aware of each disabling component in use. +/// +/// Think carefully about whether you need to use a new disabling component, +/// and clearly communicate their presence in any libraries you publish. +#[derive(Resource, Debug)] #[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] pub struct DefaultQueryFilters { - disabled: Option, + // We only expect a few components per application to act as disabling components, so we use a SmallVec here + // to avoid heap allocation in most cases. + disabling: SmallVec<[ComponentId; 4]>, +} + +impl FromWorld for DefaultQueryFilters { + fn from_world(world: &mut World) -> Self { + let mut filters = DefaultQueryFilters::empty(); + let disabled_component_id = world.register_component::(); + filters.register_disabling_component(disabled_component_id); + filters + } } impl DefaultQueryFilters { - /// Set the [`ComponentId`] for the entity disabling marker - pub(crate) fn set_disabled(&mut self, component_id: ComponentId) -> Option<()> { - if self.disabled.is_some() { - return None; + /// Creates a new, completely empty [`DefaultQueryFilters`]. + /// + /// This is provided as an escape hatch; in most cases you should initialize this using [`FromWorld`], + /// which is automatically called when creating a new [`World`]. + #[must_use] + pub fn empty() -> Self { + DefaultQueryFilters { + disabling: SmallVec::new(), } - self.disabled = Some(component_id); - Some(()) } - /// Get an iterator over all currently enabled filter components - pub fn ids(&self) -> impl Iterator { - [self.disabled].into_iter().flatten() + /// Adds this [`ComponentId`] to the set of [`DefaultQueryFilters`], + /// causing entities with this component to be excluded from queries. + /// + /// This method is idempotent, and will not add the same component multiple times. + /// + /// # Warning + /// + /// This method should only be called before the app starts, as it will not affect queries + /// initialized before it is called. + /// + /// As discussed in the [module docs](crate::entity_disabling), this can have performance implications, + /// as well as create interoperability issues, and should be used with caution. + pub fn register_disabling_component(&mut self, component_id: ComponentId) { + if !self.disabling.contains(&component_id) { + self.disabling.push(component_id); + } } - pub(super) fn apply(&self, component_access: &mut FilteredAccess) { - for component_id in self.ids() { + /// Get an iterator over all of the components which disable entities when present. + pub fn disabling_ids(&self) -> impl Iterator + use<'_> { + self.disabling.iter().copied() + } + + /// Modifies the provided [`FilteredAccess`] to include the filters from this [`DefaultQueryFilters`]. + pub(super) fn modify_access(&self, component_access: &mut FilteredAccess) { + for component_id in self.disabling_ids() { if !component_access.contains(component_id) { component_access.and_without(component_id); } @@ -69,7 +192,7 @@ impl DefaultQueryFilters { } pub(super) fn is_dense(&self, components: &Components) -> bool { - self.ids().all(|component_id| { + self.disabling_ids().all(|component_id| { components .get_info(component_id) .is_some_and(|info| info.storage_type() == StorageType::Table) @@ -81,24 +204,16 @@ impl DefaultQueryFilters { mod tests { use super::*; + use crate::{ + prelude::World, + query::{Has, With}, + }; use alloc::{vec, vec::Vec}; #[test] - fn test_set_filters() { - let mut filters = DefaultQueryFilters::default(); - assert_eq!(0, filters.ids().count()); - - assert!(filters.set_disabled(ComponentId::new(1)).is_some()); - assert!(filters.set_disabled(ComponentId::new(3)).is_none()); - - assert_eq!(1, filters.ids().count()); - assert_eq!(Some(ComponentId::new(1)), filters.ids().next()); - } - - #[test] - fn test_apply_filters() { - let mut filters = DefaultQueryFilters::default(); - filters.set_disabled(ComponentId::new(1)); + fn filters_modify_access() { + let mut filters = DefaultQueryFilters::empty(); + filters.register_disabling_component(ComponentId::new(1)); // A component access with an unrelated component let mut component_access = FilteredAccess::::default(); @@ -107,7 +222,7 @@ mod tests { .add_component_read(ComponentId::new(2)); let mut applied_access = component_access.clone(); - filters.apply(&mut applied_access); + filters.modify_access(&mut applied_access); assert_eq!(0, applied_access.with_filters().count()); assert_eq!( vec![ComponentId::new(1)], @@ -118,7 +233,7 @@ mod tests { component_access.and_with(ComponentId::new(4)); let mut applied_access = component_access.clone(); - filters.apply(&mut applied_access); + filters.modify_access(&mut applied_access); assert_eq!( vec![ComponentId::new(4)], applied_access.with_filters().collect::>() @@ -133,7 +248,7 @@ mod tests { component_access.and_with(ComponentId::new(1)); let mut applied_access = component_access.clone(); - filters.apply(&mut applied_access); + filters.modify_access(&mut applied_access); assert_eq!( vec![ComponentId::new(1), ComponentId::new(4)], applied_access.with_filters().collect::>() @@ -147,11 +262,46 @@ mod tests { .add_archetypal(ComponentId::new(1)); let mut applied_access = component_access.clone(); - filters.apply(&mut applied_access); + filters.modify_access(&mut applied_access); assert_eq!( vec![ComponentId::new(4)], applied_access.with_filters().collect::>() ); assert_eq!(0, applied_access.without_filters().count()); } + + #[derive(Component)] + struct CustomDisabled; + + #[test] + fn multiple_disabling_components() { + let mut world = World::new(); + world.register_disabling_component::(); + + world.spawn_empty(); + world.spawn(Disabled); + world.spawn(CustomDisabled); + world.spawn((Disabled, CustomDisabled)); + + let mut query = world.query::<()>(); + assert_eq!(1, query.iter(&world).count()); + + let mut query = world.query_filtered::<(), With>(); + assert_eq!(1, query.iter(&world).count()); + + let mut query = world.query::>(); + assert_eq!(2, query.iter(&world).count()); + + let mut query = world.query_filtered::<(), With>(); + assert_eq!(1, query.iter(&world).count()); + + let mut query = world.query::>(); + assert_eq!(2, query.iter(&world).count()); + + let mut query = world.query_filtered::<(), (With, With)>(); + assert_eq!(1, query.iter(&world).count()); + + let mut query = world.query::<(Has, Has)>(); + assert_eq!(4, query.iter(&world).count()); + } } diff --git a/crates/bevy_ecs/src/error/bevy_error.rs b/crates/bevy_ecs/src/error/bevy_error.rs new file mode 100644 index 0000000000..0686e68f1d --- /dev/null +++ b/crates/bevy_ecs/src/error/bevy_error.rs @@ -0,0 +1,249 @@ +use alloc::boxed::Box; +use core::{ + error::Error, + fmt::{Debug, Display}, +}; + +/// The built in "universal" Bevy error type. This has a blanket [`From`] impl for any type that implements Rust's [`Error`], +/// meaning it can be used as a "catch all" error. +/// +/// # Backtraces +/// +/// When used with the `backtrace` Cargo feature, it will capture a backtrace when the error is constructed (generally in the [`From`] impl]). +/// When printed, the backtrace will be displayed. By default, the backtrace will be trimmed down to filter out noise. To see the full backtrace, +/// set the `BEVY_BACKTRACE=full` environment variable. +/// +/// # Usage +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// +/// fn fallible_system() -> Result<(), BevyError> { +/// // This will result in Rust's built-in ParseIntError, which will automatically +/// // be converted into a BevyError. +/// let parsed: usize = "I am not a number".parse()?; +/// Ok(()) +/// } +/// ``` +pub struct BevyError { + inner: Box, +} + +impl BevyError { + /// Attempts to downcast the internal error to the given type. + pub fn downcast_ref(&self) -> Option<&E> { + self.inner.error.downcast_ref::() + } + + fn format_backtrace(&self, _f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + #[cfg(feature = "backtrace")] + { + let f = _f; + let backtrace = &self.inner.backtrace; + if let std::backtrace::BacktraceStatus::Captured = backtrace.status() { + let full_backtrace = std::env::var("BEVY_BACKTRACE").is_ok_and(|val| val == "full"); + + let backtrace_str = alloc::string::ToString::to_string(backtrace); + let mut skip_next_location_line = false; + for line in backtrace_str.split('\n') { + if !full_backtrace { + if skip_next_location_line { + if line.starts_with(" at") { + continue; + } + skip_next_location_line = false; + } + if line.contains("std::backtrace_rs::backtrace::") { + skip_next_location_line = true; + continue; + } + if line.contains("std::backtrace::Backtrace::") { + skip_next_location_line = true; + continue; + } + if line.contains(">::from") { + skip_next_location_line = true; + continue; + } + if line.contains(" as core::ops::try_trait::FromResidual>>::from_residual") { + skip_next_location_line = true; + continue; + } + if line.contains("__rust_begin_short_backtrace") { + break; + } + if line.contains("bevy_ecs::observer::Observers::invoke::{{closure}}") { + break; + } + } + writeln!(f, "{}", line)?; + } + if !full_backtrace { + if std::thread::panicking() { + SKIP_NORMAL_BACKTRACE.set(true); + } + writeln!(f, "{FILTER_MESSAGE}")?; + } + } + } + Ok(()) + } +} + +/// This type exists (rather than having a `BevyError(Box, + #[cfg(feature = "backtrace")] + backtrace: std::backtrace::Backtrace, +} + +// NOTE: writing the impl this way gives us From<&str> ... nice! +impl From for BevyError +where + Box: From, +{ + #[cold] + fn from(error: E) -> Self { + BevyError { + inner: Box::new(InnerBevyError { + error: error.into(), + #[cfg(feature = "backtrace")] + backtrace: std::backtrace::Backtrace::capture(), + }), + } + } +} + +impl Display for BevyError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + writeln!(f, "{}", self.inner.error)?; + self.format_backtrace(f)?; + Ok(()) + } +} + +impl Debug for BevyError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + writeln!(f, "{:?}", self.inner.error)?; + self.format_backtrace(f)?; + Ok(()) + } +} + +#[cfg(feature = "backtrace")] +const FILTER_MESSAGE: &str = "note: Some \"noisy\" backtrace lines have been filtered out. Run with `BEVY_BACKTRACE=full` for a verbose backtrace."; + +#[cfg(feature = "backtrace")] +std::thread_local! { + static SKIP_NORMAL_BACKTRACE: core::cell::Cell = + const { core::cell::Cell::new(false) }; +} + +/// When called, this will skip the currently configured panic hook when a [`BevyError`] backtrace has already been printed. +#[cfg(feature = "backtrace")] +#[expect(clippy::print_stdout, reason = "Allowed behind `std` feature gate.")] +pub fn bevy_error_panic_hook( + current_hook: impl Fn(&std::panic::PanicHookInfo), +) -> impl Fn(&std::panic::PanicHookInfo) { + move |info| { + if SKIP_NORMAL_BACKTRACE.replace(false) { + if let Some(payload) = info.payload().downcast_ref::<&str>() { + std::println!("{payload}"); + } else if let Some(payload) = info.payload().downcast_ref::() { + std::println!("{payload}"); + } + return; + } + + current_hook(info); + } +} + +#[cfg(test)] +mod tests { + + #[test] + #[cfg(not(miri))] // miri backtraces are weird + #[cfg(not(windows))] // the windows backtrace in this context is ... unhelpful and not worth testing + fn filtered_backtrace_test() { + fn i_fail() -> crate::error::Result { + let _: usize = "I am not a number".parse()?; + Ok(()) + } + + // SAFETY: this is not safe ... this test could run in parallel with another test + // that writes the environment variable. We either accept that so we can write this test, + // or we don't. + + unsafe { std::env::set_var("RUST_BACKTRACE", "1") }; + + let error = i_fail().err().unwrap(); + let debug_message = alloc::format!("{error:?}"); + let mut lines = debug_message.lines().peekable(); + assert_eq!( + "ParseIntError { kind: InvalidDigit }", + lines.next().unwrap() + ); + + // On mac backtraces can start with Backtrace::create + let mut skip = false; + if let Some(line) = lines.peek() { + if &line[6..] == "std::backtrace::Backtrace::create" { + skip = true; + } + } + + if skip { + lines.next().unwrap(); + } + + let expected_lines = alloc::vec![ + "bevy_ecs::error::bevy_error::tests::filtered_backtrace_test::i_fail", + "bevy_ecs::error::bevy_error::tests::filtered_backtrace_test", + "bevy_ecs::error::bevy_error::tests::filtered_backtrace_test::{{closure}}", + "core::ops::function::FnOnce::call_once", + ]; + + for expected in expected_lines { + let line = lines.next().unwrap(); + assert_eq!(&line[6..], expected); + let mut skip = false; + if let Some(line) = lines.peek() { + if line.starts_with(" at") { + skip = true; + } + } + + if skip { + lines.next().unwrap(); + } + } + + // on linux there is a second call_once + let mut skip = false; + if let Some(line) = lines.peek() { + if &line[6..] == "core::ops::function::FnOnce::call_once" { + skip = true; + } + } + if skip { + lines.next().unwrap(); + } + let mut skip = false; + if let Some(line) = lines.peek() { + if line.starts_with(" at") { + skip = true; + } + } + + if skip { + lines.next().unwrap(); + } + assert_eq!(super::FILTER_MESSAGE, lines.next().unwrap()); + assert!(lines.next().is_none()); + } +} diff --git a/crates/bevy_ecs/src/error/command_handling.rs b/crates/bevy_ecs/src/error/command_handling.rs new file mode 100644 index 0000000000..d85ad4a87e --- /dev/null +++ b/crates/bevy_ecs/src/error/command_handling.rs @@ -0,0 +1,120 @@ +use core::{any::type_name, fmt}; + +use crate::{ + entity::Entity, + never::Never, + system::{entity_command::EntityCommandError, Command, EntityCommand}, + world::{error::EntityMutableFetchError, World}, +}; + +use super::{default_error_handler, BevyError, ErrorContext}; + +/// Takes a [`Command`] that returns a Result and uses a given error handler function to convert it into +/// a [`Command`] that internally handles an error if it occurs and returns `()`. +pub trait HandleError { + /// Takes a [`Command`] that returns a Result and uses a given error handler function to convert it into + /// a [`Command`] that internally handles an error if it occurs and returns `()`. + fn handle_error_with(self, error_handler: fn(BevyError, ErrorContext)) -> impl Command; + /// Takes a [`Command`] that returns a Result and uses the default error handler function to convert it into + /// a [`Command`] that internally handles an error if it occurs and returns `()`. + fn handle_error(self) -> impl Command + where + Self: Sized, + { + self.handle_error_with(default_error_handler()) + } +} + +impl HandleError> for C +where + C: Command>, + E: Into, +{ + fn handle_error_with(self, error_handler: fn(BevyError, ErrorContext)) -> impl Command { + move |world: &mut World| match self.apply(world) { + Ok(_) => {} + Err(err) => (error_handler)( + err.into(), + ErrorContext::Command { + name: type_name::().into(), + }, + ), + } + } +} + +impl HandleError for C +where + C: Command, +{ + fn handle_error_with(self, _error_handler: fn(BevyError, ErrorContext)) -> impl Command { + move |world: &mut World| { + self.apply(world); + } + } +} + +impl HandleError for C +where + C: Command, +{ + #[inline] + fn handle_error_with(self, _error_handler: fn(BevyError, ErrorContext)) -> impl Command { + self + } + #[inline] + fn handle_error(self) -> impl Command + where + Self: Sized, + { + self + } +} + +/// Passes in a specific entity to an [`EntityCommand`], resulting in a [`Command`] that +/// internally runs the [`EntityCommand`] on that entity. +/// +// NOTE: This is a separate trait from `EntityCommand` because "result-returning entity commands" and +// "non-result returning entity commands" require different implementations, so they cannot be automatically +// implemented. And this isn't the type of implementation that we want to thrust on people implementing +// EntityCommand. +pub trait CommandWithEntity { + /// Passes in a specific entity to an [`EntityCommand`], resulting in a [`Command`] that + /// internally runs the [`EntityCommand`] on that entity. + fn with_entity(self, entity: Entity) -> impl Command + HandleError; +} + +impl CommandWithEntity> for C +where + C: EntityCommand, +{ + fn with_entity( + self, + entity: Entity, + ) -> impl Command> + + HandleError> { + move |world: &mut World| -> Result<(), EntityMutableFetchError> { + let entity = world.get_entity_mut(entity)?; + self.apply(entity); + Ok(()) + } + } +} + +impl CommandWithEntity>> for C +where + C: EntityCommand>, + Err: fmt::Debug + fmt::Display + Send + Sync + 'static, +{ + fn with_entity( + self, + entity: Entity, + ) -> impl Command>> + HandleError>> + { + move |world: &mut World| { + let entity = world.get_entity_mut(entity)?; + self.apply(entity) + .map_err(EntityCommandError::CommandFailed) + } + } +} diff --git a/crates/bevy_ecs/src/error/handler.rs b/crates/bevy_ecs/src/error/handler.rs new file mode 100644 index 0000000000..688b599473 --- /dev/null +++ b/crates/bevy_ecs/src/error/handler.rs @@ -0,0 +1,183 @@ +#[cfg(feature = "configurable_error_handler")] +use bevy_platform::sync::OnceLock; +use core::fmt::Display; + +use crate::{component::Tick, error::BevyError}; +use alloc::borrow::Cow; + +/// Context for a [`BevyError`] to aid in debugging. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum ErrorContext { + /// The error occurred in a system. + System { + /// The name of the system that failed. + name: Cow<'static, str>, + /// The last tick that the system was run. + last_run: Tick, + }, + /// The error occurred in a run condition. + RunCondition { + /// The name of the run condition that failed. + name: Cow<'static, str>, + /// The last tick that the run condition was evaluated. + last_run: Tick, + }, + /// The error occurred in a command. + Command { + /// The name of the command that failed. + name: Cow<'static, str>, + }, + /// The error occurred in an observer. + Observer { + /// The name of the observer that failed. + name: Cow<'static, str>, + /// The last tick that the observer was run. + last_run: Tick, + }, +} + +impl Display for ErrorContext { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Self::System { name, .. } => { + write!(f, "System `{}` failed", name) + } + Self::Command { name } => write!(f, "Command `{}` failed", name), + Self::Observer { name, .. } => { + write!(f, "Observer `{}` failed", name) + } + Self::RunCondition { name, .. } => { + write!(f, "Run condition `{}` failed", name) + } + } + } +} + +impl ErrorContext { + /// The name of the ECS construct that failed. + pub fn name(&self) -> &str { + match self { + Self::System { name, .. } + | Self::Command { name, .. } + | Self::Observer { name, .. } + | Self::RunCondition { name, .. } => name, + } + } + + /// A string representation of the kind of ECS construct that failed. + /// + /// This is a simpler helper used for logging. + pub fn kind(&self) -> &str { + match self { + Self::System { .. } => "system", + Self::Command { .. } => "command", + Self::Observer { .. } => "observer", + Self::RunCondition { .. } => "run condition", + } + } +} + +/// A global error handler. This can be set at startup, as long as it is set before +/// any uses. This should generally be configured _before_ initializing the app. +/// +/// This should be set inside of your `main` function, before initializing the Bevy app. +/// The value of this error handler can be accessed using the [`default_error_handler`] function, +/// which calls [`OnceLock::get_or_init`] to get the value. +/// +/// **Note:** this is only available when the `configurable_error_handler` feature of `bevy_ecs` (or `bevy`) is enabled! +/// +/// # Example +/// +/// ``` +/// # use bevy_ecs::error::{GLOBAL_ERROR_HANDLER, warn}; +/// GLOBAL_ERROR_HANDLER.set(warn).expect("The error handler can only be set once, globally."); +/// // initialize Bevy App here +/// ``` +/// +/// To use this error handler in your app for custom error handling logic: +/// +/// ```rust +/// use bevy_ecs::error::{default_error_handler, GLOBAL_ERROR_HANDLER, BevyError, ErrorContext, panic}; +/// +/// fn handle_errors(error: BevyError, ctx: ErrorContext) { +/// let error_handler = default_error_handler(); +/// error_handler(error, ctx); +/// } +/// ``` +/// +/// # Warning +/// +/// As this can *never* be overwritten, library code should never set this value. +#[cfg(feature = "configurable_error_handler")] +pub static GLOBAL_ERROR_HANDLER: OnceLock = OnceLock::new(); + +/// The default error handler. This defaults to [`panic()`], +/// but if set, the [`GLOBAL_ERROR_HANDLER`] will be used instead, enabling error handler customization. +/// The `configurable_error_handler` feature must be enabled to change this from the panicking default behavior, +/// as there may be runtime overhead. +#[inline] +pub fn default_error_handler() -> fn(BevyError, ErrorContext) { + #[cfg(not(feature = "configurable_error_handler"))] + return panic; + + #[cfg(feature = "configurable_error_handler")] + return *GLOBAL_ERROR_HANDLER.get_or_init(|| panic); +} + +macro_rules! inner { + ($call:path, $e:ident, $c:ident) => { + $call!( + "Encountered an error in {} `{}`: {}", + $c.kind(), + $c.name(), + $e + ); + }; +} + +/// Error handler that panics with the system error. +#[track_caller] +#[inline] +pub fn panic(error: BevyError, ctx: ErrorContext) { + inner!(panic, error, ctx); +} + +/// Error handler that logs the system error at the `error` level. +#[track_caller] +#[inline] +pub fn error(error: BevyError, ctx: ErrorContext) { + inner!(log::error, error, ctx); +} + +/// Error handler that logs the system error at the `warn` level. +#[track_caller] +#[inline] +pub fn warn(error: BevyError, ctx: ErrorContext) { + inner!(log::warn, error, ctx); +} + +/// Error handler that logs the system error at the `info` level. +#[track_caller] +#[inline] +pub fn info(error: BevyError, ctx: ErrorContext) { + inner!(log::info, error, ctx); +} + +/// Error handler that logs the system error at the `debug` level. +#[track_caller] +#[inline] +pub fn debug(error: BevyError, ctx: ErrorContext) { + inner!(log::debug, error, ctx); +} + +/// Error handler that logs the system error at the `trace` level. +#[track_caller] +#[inline] +pub fn trace(error: BevyError, ctx: ErrorContext) { + inner!(log::trace, error, ctx); +} + +/// Error handler that ignores the system error. +#[track_caller] +#[inline] +pub fn ignore(_: BevyError, _: ErrorContext) {} diff --git a/crates/bevy_ecs/src/error/mod.rs b/crates/bevy_ecs/src/error/mod.rs new file mode 100644 index 0000000000..950deee3ec --- /dev/null +++ b/crates/bevy_ecs/src/error/mod.rs @@ -0,0 +1,81 @@ +//! Error handling for Bevy systems, commands, and observers. +//! +//! When a system is added to a [`Schedule`], and its return type is that of [`Result`], then Bevy +//! considers those systems to be "fallible", and the ECS scheduler will special-case the [`Err`] +//! variant of the returned `Result`. +//! +//! All [`BevyError`]s returned by a system, observer or command are handled by an "error handler". By default, the +//! [`panic`] error handler function is used, resulting in a panic with the error message attached. +//! +//! You can change the default behavior by registering a custom error handler. +//! Modify the [`GLOBAL_ERROR_HANDLER`] value to set a custom error handler function for your entire app. +//! In practice, this is generally feature-flagged: panicking or loudly logging errors in development, +//! and quietly logging or ignoring them in production to avoid crashing the app. +//! +//! Bevy provides a number of pre-built error-handlers for you to use: +//! +//! - [`panic`] – panics with the system error +//! - [`error`] – logs the system error at the `error` level +//! - [`warn`] – logs the system error at the `warn` level +//! - [`info`] – logs the system error at the `info` level +//! - [`debug`] – logs the system error at the `debug` level +//! - [`trace`] – logs the system error at the `trace` level +//! - [`ignore`] – ignores the system error +//! +//! However, you can use any custom error handler logic by providing your own function (or +//! non-capturing closure that coerces to the function signature) as long as it matches the +//! signature: +//! +//! ```rust,ignore +//! fn(BevyError, ErrorContext) +//! ``` +//! +//! The [`ErrorContext`] allows you to access additional details relevant to providing +//! context surrounding the error – such as the system's [`name`] – in your error messages. +//! +//! Remember to turn on the `configurable_error_handler` feature to set a global error handler! +//! +//! ```rust, ignore +//! use bevy_ecs::error::{GLOBAL_ERROR_HANDLER, BevyError, ErrorContext}; +//! use log::trace; +//! +//! fn my_error_handler(error: BevyError, ctx: ErrorContext) { +//! if ctx.name().ends_with("plz_ignore") { +//! trace!("Nothing to see here, move along."); +//! return; +//! } +//! bevy_ecs::error::error(error, ctx); +//! } +//! +//! fn main() { +//! // This requires the "configurable_error_handler" feature to be enabled to be in scope. +//! GLOBAL_ERROR_HANDLER.set(my_error_handler).expect("The error handler can only be set once."); +//! +//! // Initialize your Bevy App here +//! } +//! ``` +//! +//! If you need special handling of individual fallible systems, you can use Bevy's [`system piping +//! feature`] to capture the [`Result`] output of the system and handle it accordingly. +//! +//! When working with commands, you can handle the result of each command separately using the [`HandleError::handle_error_with`] method. +//! +//! [`Schedule`]: crate::schedule::Schedule +//! [`panic`]: panic() +//! [`World`]: crate::world::World +//! [`System`]: crate::system::System +//! [`name`]: crate::system::System::name +//! [`system piping feature`]: crate::system::In + +mod bevy_error; +mod command_handling; +mod handler; + +pub use bevy_error::*; +pub use command_handling::*; +pub use handler::*; + +/// A result type for use in fallible systems, commands and observers. +/// +/// The [`BevyError`] type is a type-erased error type with optional Bevy-specific diagnostics. +pub type Result = core::result::Result; diff --git a/crates/bevy_ecs/src/event/base.rs b/crates/bevy_ecs/src/event/base.rs index 2b5c090504..d525ba2e57 100644 --- a/crates/bevy_ecs/src/event/base.rs +++ b/crates/bevy_ecs/src/event/base.rs @@ -1,10 +1,9 @@ +use crate::change_detection::MaybeLocation; use crate::component::ComponentId; use crate::world::World; use crate::{component::Component, traversal::Traversal}; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; -#[cfg(feature = "track_location")] -use core::panic::Location; use core::{ cmp::Ordering, fmt, @@ -19,10 +18,22 @@ use core::{ /// /// Events can also be "triggered" on a [`World`], which will then cause any [`Observer`] of that trigger to run. /// -/// This trait can be derived. -/// /// Events must be thread-safe. /// +/// ## Derive +/// This trait can be derived. +/// Adding `auto_propagate` sets [`Self::AUTO_PROPAGATE`] to true. +/// Adding `traversal = "X"` sets [`Self::Traversal`] to be of type "X". +/// +/// ``` +/// use bevy_ecs::prelude::*; +/// +/// #[derive(Event)] +/// #[event(auto_propagate)] +/// struct MyEvent; +/// ``` +/// +/// /// [`World`]: crate::world::World /// [`ComponentId`]: crate::component::ComponentId /// [`Observer`]: crate::observer::Observer @@ -82,7 +93,7 @@ pub trait Event: Send + Sync + 'static { /// /// This exists so we can easily get access to a unique [`ComponentId`] for each [`Event`] type, /// without requiring that [`Event`] types implement [`Component`] directly. -/// [`ComponentId`] is used internally as a unique identitifier for events because they are: +/// [`ComponentId`] is used internally as a unique identifier for events because they are: /// /// - Unique to each event type. /// - Can be quickly generated and looked up. @@ -99,15 +110,18 @@ struct EventWrapperComponent(PhantomData); /// sent to the point it was processed. `EventId`s increase monotonically by send order. /// /// [`World`]: crate::world::World -#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, Debug, PartialEq, Hash) +)] pub struct EventId { /// Uniquely identifies the event associated with this ID. // This value corresponds to the order in which each event was added to the world. pub id: usize, /// The source code location that triggered this event. - #[cfg(feature = "track_location")] - pub caller: &'static Location<'static>, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + pub caller: MaybeLocation, + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(super) _marker: PhantomData, } diff --git a/crates/bevy_ecs/src/event/collections.rs b/crates/bevy_ecs/src/event/collections.rs index a62dcfb30c..66447b7de4 100644 --- a/crates/bevy_ecs/src/event/collections.rs +++ b/crates/bevy_ecs/src/event/collections.rs @@ -1,10 +1,9 @@ use alloc::vec::Vec; use bevy_ecs::{ + change_detection::MaybeLocation, event::{Event, EventCursor, EventId, EventInstance}, resource::Resource, }; -#[cfg(feature = "track_location")] -use core::panic::Location; use core::{ marker::PhantomData, ops::{Deref, DerefMut}, @@ -74,7 +73,7 @@ use { /// - [`EventReader`]s that read at least once per update will never drop events. /// - [`EventReader`]s that read once within two updates might still receive some events /// - [`EventReader`]s that read after two updates are guaranteed to drop all events that occurred -/// before those updates. +/// before those updates. /// /// The buffers in [`Events`] will grow indefinitely if [`update`](Events::update) is never called. /// @@ -123,21 +122,12 @@ impl Events { /// This method returns the [ID](`EventId`) of the sent `event`. #[track_caller] pub fn send(&mut self, event: E) -> EventId { - self.send_with_caller( - event, - #[cfg(feature = "track_location")] - Location::caller(), - ) + self.send_with_caller(event, MaybeLocation::caller()) } - pub(crate) fn send_with_caller( - &mut self, - event: E, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, - ) -> EventId { + pub(crate) fn send_with_caller(&mut self, event: E, caller: MaybeLocation) -> EventId { let event_id = EventId { id: self.event_count, - #[cfg(feature = "track_location")] caller, _marker: PhantomData, }; @@ -307,8 +297,7 @@ impl Extend for Events { let events = iter.into_iter().map(|event| { let event_id = EventId { id: event_count, - #[cfg(feature = "track_location")] - caller: Location::caller(), + caller: MaybeLocation::caller(), _marker: PhantomData, }; event_count += 1; @@ -331,7 +320,7 @@ impl Extend for Events { } #[derive(Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Default))] pub(crate) struct EventSequence { pub(crate) events: Vec>, pub(crate) start_event_count: usize, @@ -378,8 +367,7 @@ impl Iterator for SendBatchIds { let result = Some(EventId { id: self.last_count, - #[cfg(feature = "track_location")] - caller: Location::caller(), + caller: MaybeLocation::caller(), _marker: PhantomData, }); diff --git a/crates/bevy_ecs/src/event/mutator.rs b/crates/bevy_ecs/src/event/mutator.rs index 0afbeaa00d..e95037af5b 100644 --- a/crates/bevy_ecs/src/event/mutator.rs +++ b/crates/bevy_ecs/src/event/mutator.rs @@ -44,6 +44,7 @@ use bevy_ecs::{ #[derive(SystemParam, Debug)] pub struct EventMutator<'w, 's, E: Event> { pub(super) reader: Local<'s, EventCursor>, + #[system_param(validation_message = "Event not initialized")] events: ResMut<'w, Events>, } diff --git a/crates/bevy_ecs/src/event/reader.rs b/crates/bevy_ecs/src/event/reader.rs index bc0f4f86bc..995e2ca9e9 100644 --- a/crates/bevy_ecs/src/event/reader.rs +++ b/crates/bevy_ecs/src/event/reader.rs @@ -16,6 +16,7 @@ use bevy_ecs::{ #[derive(SystemParam, Debug)] pub struct EventReader<'w, 's, E: Event> { pub(super) reader: Local<'s, EventCursor>, + #[system_param(validation_message = "Event not initialized")] events: Res<'w, Events>, } diff --git a/crates/bevy_ecs/src/event/writer.rs b/crates/bevy_ecs/src/event/writer.rs index ebdedce177..a1c42f8b60 100644 --- a/crates/bevy_ecs/src/event/writer.rs +++ b/crates/bevy_ecs/src/event/writer.rs @@ -14,14 +14,14 @@ use bevy_ecs::{ /// #[derive(Event)] /// pub struct MyEvent; // Custom event type. /// fn my_system(mut writer: EventWriter) { -/// writer.send(MyEvent); +/// writer.write(MyEvent); /// } /// /// # bevy_ecs::system::assert_is_system(my_system); /// ``` /// # Observers /// -/// "Buffered" Events, such as those sent directly in [`Events`] or sent using [`EventWriter`], do _not_ automatically +/// "Buffered" Events, such as those sent directly in [`Events`] or written using [`EventWriter`], do _not_ automatically /// trigger any [`Observer`]s watching for that event, as each [`Event`] has different requirements regarding _if_ it will /// be triggered, and if so, _when_ it will be triggered in the schedule. /// @@ -32,7 +32,7 @@ use bevy_ecs::{ /// /// # Untyped events /// -/// `EventWriter` can only send events of one specific type, which must be known at compile-time. +/// `EventWriter` can only write events of one specific type, which must be known at compile-time. /// This is not a problem most of the time, but you may find a situation where you cannot know /// ahead of time every kind of event you'll need to send. In this case, you can use the "type-erased event" pattern. /// @@ -60,17 +60,53 @@ use bevy_ecs::{ /// [`Observer`]: crate::observer::Observer #[derive(SystemParam)] pub struct EventWriter<'w, E: Event> { + #[system_param(validation_message = "Event not initialized")] events: ResMut<'w, Events>, } impl<'w, E: Event> EventWriter<'w, E> { + /// Writes an `event`, which can later be read by [`EventReader`](super::EventReader)s. + /// This method returns the [ID](`EventId`) of the written `event`. + /// + /// See [`Events`] for details. + #[doc(alias = "send")] + #[track_caller] + pub fn write(&mut self, event: E) -> EventId { + self.events.send(event) + } + + /// Sends a list of `events` all at once, which can later be read by [`EventReader`](super::EventReader)s. + /// This is more efficient than sending each event individually. + /// This method returns the [IDs](`EventId`) of the written `events`. + /// + /// See [`Events`] for details. + #[doc(alias = "send_batch")] + #[track_caller] + pub fn write_batch(&mut self, events: impl IntoIterator) -> SendBatchIds { + self.events.send_batch(events) + } + + /// Writes the default value of the event. Useful when the event is an empty struct. + /// This method returns the [ID](`EventId`) of the written `event`. + /// + /// See [`Events`] for details. + #[doc(alias = "send_default")] + #[track_caller] + pub fn write_default(&mut self) -> EventId + where + E: Default, + { + self.events.send_default() + } + /// Sends an `event`, which can later be read by [`EventReader`](super::EventReader)s. /// This method returns the [ID](`EventId`) of the sent `event`. /// /// See [`Events`] for details. + #[deprecated(since = "0.16.0", note = "Use `EventWriter::write` instead.")] #[track_caller] pub fn send(&mut self, event: E) -> EventId { - self.events.send(event) + self.write(event) } /// Sends a list of `events` all at once, which can later be read by [`EventReader`](super::EventReader)s. @@ -78,20 +114,22 @@ impl<'w, E: Event> EventWriter<'w, E> { /// This method returns the [IDs](`EventId`) of the sent `events`. /// /// See [`Events`] for details. + #[deprecated(since = "0.16.0", note = "Use `EventWriter::write_batch` instead.")] #[track_caller] pub fn send_batch(&mut self, events: impl IntoIterator) -> SendBatchIds { - self.events.send_batch(events) + self.write_batch(events) } /// Sends the default value of the event. Useful when the event is an empty struct. /// This method returns the [ID](`EventId`) of the sent `event`. /// /// See [`Events`] for details. + #[deprecated(since = "0.16.0", note = "Use `EventWriter::write_default` instead.")] #[track_caller] pub fn send_default(&mut self) -> EventId where E: Default, { - self.events.send_default() + self.write_default() } } diff --git a/crates/bevy_ecs/src/hierarchy.rs b/crates/bevy_ecs/src/hierarchy.rs index 6c8ee9f31b..9f4b0d0f8f 100644 --- a/crates/bevy_ecs/src/hierarchy.rs +++ b/crates/bevy_ecs/src/hierarchy.rs @@ -17,12 +17,16 @@ use crate::{ world::{DeferredWorld, EntityWorldMut, FromWorld, World}, }; use alloc::{format, string::String, vec::Vec}; +#[cfg(feature = "bevy_reflect")] +use bevy_reflect::std_traits::ReflectDefault; use core::ops::Deref; use core::slice; use disqualified::ShortName; use log::warn; -/// A [`Relationship`](crate::relationship::Relationship) component that creates the canonical +/// Stores the parent entity of this child entity with this component. +/// +/// This is a [`Relationship`] component, and creates the canonical /// "parent / child" hierarchy. This is the "source of truth" component, and it pairs with /// the [`Children`] [`RelationshipTarget`](crate::relationship::RelationshipTarget). /// @@ -31,7 +35,6 @@ use log::warn; /// 1. Organizing entities in a scene /// 2. Propagating configuration or data inherited from a parent, such as "visibility" or "world-space global transforms". /// 3. Ensuring a hierarchy is despawned when an entity is despawned. -/// 4. /// /// [`ChildOf`] contains a single "target" [`Entity`]. When [`ChildOf`] is inserted on a "source" entity, /// the "target" entity will automatically (and immediately, via a component hook) have a [`Children`] @@ -85,28 +88,30 @@ use log::warn; /// assert_eq!(&**world.entity(root).get::().unwrap(), &[child1, child2]); /// assert_eq!(&**world.entity(child1).get::().unwrap(), &[grandchild]); /// ``` +/// +/// [`Relationship`]: crate::relationship::Relationship #[derive(Component, Clone, PartialEq, Eq, Debug)] #[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] #[cfg_attr( feature = "bevy_reflect", - reflect(Component, PartialEq, Debug, FromWorld) + reflect(Component, PartialEq, Debug, FromWorld, Clone) )] #[relationship(relationship_target = Children)] +#[doc(alias = "IsChild", alias = "Parent")] pub struct ChildOf(pub Entity); impl ChildOf { - /// Returns the "target" entity. - pub fn get(&self) -> Entity { + /// The parent entity of this child entity. + #[inline] + pub fn parent(&self) -> Entity { self.0 } -} - -impl Deref for ChildOf { - type Target = Entity; + /// The parent entity of this child entity. + #[deprecated(since = "0.16.0", note = "Use child_of.parent() instead")] #[inline] - fn deref(&self) -> &Self::Target { - &self.0 + pub fn get(&self) -> Entity { + self.0 } } @@ -121,17 +126,122 @@ impl FromWorld for ChildOf { } } -/// A [`RelationshipTarget`](crate::relationship::RelationshipTarget) collection component that is populated -/// with entities that "target" this entity with the [`ChildOf`] [`Relationship`](crate::relationship::Relationship) component. +/// Tracks which entities are children of this parent entity. /// -/// Together, these components form the "canonical parent-child hierarchy". See the [`ChildOf`] component for all full +/// A [`RelationshipTarget`] collection component that is populated +/// with entities that "target" this entity with the [`ChildOf`] [`Relationship`] component. +/// +/// Together, these components form the "canonical parent-child hierarchy". See the [`ChildOf`] component for the full /// description of this relationship and instructions on how to use it. +/// +/// # Usage +/// +/// Like all [`RelationshipTarget`] components, this data should not be directly manipulated to avoid desynchronization. +/// Instead, modify the [`ChildOf`] components on the "source" entities. +/// +/// To access the children of an entity, you can iterate over the [`Children`] component, +/// using the [`IntoIterator`] trait. +/// For more complex access patterns, see the [`RelationshipTarget`] trait. +/// +/// [`Relationship`]: crate::relationship::Relationship +/// [`RelationshipTarget`]: crate::relationship::RelationshipTarget #[derive(Component, Default, Debug, PartialEq, Eq)] #[relationship_target(relationship = ChildOf, linked_spawn)] #[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] -#[cfg_attr(feature = "bevy_reflect", reflect(Component, FromWorld))] +#[cfg_attr(feature = "bevy_reflect", reflect(Component, FromWorld, Default))] +#[doc(alias = "IsParent")] pub struct Children(Vec); +impl Children { + /// Swaps the child at `a_index` with the child at `b_index`. + #[inline] + pub fn swap(&mut self, a_index: usize, b_index: usize) { + self.0.swap(a_index, b_index); + } + + /// Sorts children [stably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) + /// in place using the provided comparator function. + /// + /// For the underlying implementation, see [`slice::sort_by`]. + /// + /// For the unstable version, see [`sort_unstable_by`](Children::sort_unstable_by). + /// + /// See also [`sort_by_key`](Children::sort_by_key), [`sort_by_cached_key`](Children::sort_by_cached_key). + #[inline] + pub fn sort_by(&mut self, compare: F) + where + F: FnMut(&Entity, &Entity) -> core::cmp::Ordering, + { + self.0.sort_by(compare); + } + + /// Sorts children [stably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) + /// in place using the provided key extraction function. + /// + /// For the underlying implementation, see [`slice::sort_by_key`]. + /// + /// For the unstable version, see [`sort_unstable_by_key`](Children::sort_unstable_by_key). + /// + /// See also [`sort_by`](Children::sort_by), [`sort_by_cached_key`](Children::sort_by_cached_key). + #[inline] + pub fn sort_by_key(&mut self, compare: F) + where + F: FnMut(&Entity) -> K, + K: Ord, + { + self.0.sort_by_key(compare); + } + + /// Sorts children [stably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) + /// in place using the provided key extraction function. Only evaluates each key at most + /// once per sort, caching the intermediate results in memory. + /// + /// For the underlying implementation, see [`slice::sort_by_cached_key`]. + /// + /// See also [`sort_by`](Children::sort_by), [`sort_by_key`](Children::sort_by_key). + #[inline] + pub fn sort_by_cached_key(&mut self, compare: F) + where + F: FnMut(&Entity) -> K, + K: Ord, + { + self.0.sort_by_cached_key(compare); + } + + /// Sorts children [unstably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) + /// in place using the provided comparator function. + /// + /// For the underlying implementation, see [`slice::sort_unstable_by`]. + /// + /// For the stable version, see [`sort_by`](Children::sort_by). + /// + /// See also [`sort_unstable_by_key`](Children::sort_unstable_by_key). + #[inline] + pub fn sort_unstable_by(&mut self, compare: F) + where + F: FnMut(&Entity, &Entity) -> core::cmp::Ordering, + { + self.0.sort_unstable_by(compare); + } + + /// Sorts children [unstably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) + /// in place using the provided key extraction function. + /// + /// For the underlying implementation, see [`slice::sort_unstable_by_key`]. + /// + /// For the stable version, see [`sort_by_key`](Children::sort_by_key). + /// + /// See also [`sort_unstable_by`](Children::sort_unstable_by). + #[inline] + pub fn sort_unstable_by_key(&mut self, compare: F) + where + F: FnMut(&Entity) -> K, + K: Ord, + { + self.0.sort_unstable_by_key(compare); + } +} + impl<'a> IntoIterator for &'a Children { type Item = ::Item; @@ -159,30 +269,72 @@ pub type ChildSpawnerCommands<'w> = RelatedSpawnerCommands<'w, ChildOf>; impl<'w> EntityWorldMut<'w> { /// Spawns children of this entity (with a [`ChildOf`] relationship) by taking a function that operates on a [`ChildSpawner`]. + /// See also [`with_related`](Self::with_related). pub fn with_children(&mut self, func: impl FnOnce(&mut ChildSpawner)) -> &mut Self { - self.with_related(func); + self.with_related_entities(func); self } /// Adds the given children to this entity + /// See also [`add_related`](Self::add_related). pub fn add_children(&mut self, children: &[Entity]) -> &mut Self { self.add_related::(children) } + /// Insert children at specific index. + /// See also [`insert_related`](Self::insert_related). + pub fn insert_children(&mut self, index: usize, children: &[Entity]) -> &mut Self { + self.insert_related::(index, children) + } + /// Adds the given child to this entity + /// See also [`add_related`](Self::add_related). pub fn add_child(&mut self, child: Entity) -> &mut Self { self.add_related::(&[child]) } + /// Removes the relationship between this entity and the given entities. + pub fn remove_children(&mut self, children: &[Entity]) -> &mut Self { + self.remove_related::(children) + } + + /// Replaces all the related children with a new set of children. + pub fn replace_children(&mut self, children: &[Entity]) -> &mut Self { + self.replace_related::(children) + } + + /// Replaces all the related children with a new set of children. + /// + /// # Warning + /// + /// Failing to maintain the functions invariants may lead to erratic engine behavior including random crashes. + /// Refer to [`Self::replace_related_with_difference`] for a list of these invariants. + /// + /// # Panics + /// + /// Panics when debug assertions are enabled if an invariant is is broken and the command is executed. + pub fn replace_children_with_difference( + &mut self, + entities_to_unrelate: &[Entity], + entities_to_relate: &[Entity], + newly_related_entities: &[Entity], + ) -> &mut Self { + self.replace_related_with_difference::( + entities_to_unrelate, + entities_to_relate, + newly_related_entities, + ) + } + /// Spawns the passed bundle and adds it to this entity as a child. /// /// For efficient spawning of multiple children, use [`with_children`]. /// /// [`with_children`]: EntityWorldMut::with_children pub fn with_child(&mut self, bundle: impl Bundle) -> &mut Self { - let id = self.id(); + let parent = self.id(); self.world_scope(|world| { - world.spawn((bundle, ChildOf(id))); + world.spawn((bundle, ChildOf(parent))); }); self } @@ -208,7 +360,7 @@ impl<'a> EntityCommands<'a> { &mut self, func: impl FnOnce(&mut RelatedSpawnerCommands), ) -> &mut Self { - self.with_related(func); + self.with_related_entities(func); self } @@ -217,19 +369,57 @@ impl<'a> EntityCommands<'a> { self.add_related::(children) } + /// Insert children at specific index. + /// See also [`insert_related`](Self::insert_related). + pub fn insert_children(&mut self, index: usize, children: &[Entity]) -> &mut Self { + self.insert_related::(index, children) + } + /// Adds the given child to this entity pub fn add_child(&mut self, child: Entity) -> &mut Self { self.add_related::(&[child]) } + /// Removes the relationship between this entity and the given entities. + pub fn remove_children(&mut self, children: &[Entity]) -> &mut Self { + self.remove_related::(children) + } + + /// Replaces the children on this entity with a new list of children. + pub fn replace_children(&mut self, children: &[Entity]) -> &mut Self { + self.replace_related::(children) + } + + /// Replaces all the related entities with a new set of entities. + /// + /// # Warning + /// + /// Failing to maintain the functions invariants may lead to erratic engine behavior including random crashes. + /// Refer to [`EntityWorldMut::replace_related_with_difference`] for a list of these invariants. + /// + /// # Panics + /// + /// Panics when debug assertions are enabled if an invariant is is broken and the command is executed. + pub fn replace_children_with_difference( + &mut self, + entities_to_unrelate: &[Entity], + entities_to_relate: &[Entity], + newly_related_entities: &[Entity], + ) -> &mut Self { + self.replace_related_with_difference::( + entities_to_unrelate, + entities_to_relate, + newly_related_entities, + ) + } + /// Spawns the passed bundle and adds it to this entity as a child. /// /// For efficient spawning of multiple children, use [`with_children`]. /// /// [`with_children`]: EntityCommands::with_children pub fn with_child(&mut self, bundle: impl Bundle) -> &mut Self { - let id = self.id(); - self.commands.spawn((bundle, ChildOf(id))); + self.with_related::(bundle); self } @@ -259,7 +449,7 @@ pub fn validate_parent_has_component( return; }; if !world - .get_entity(child_of.get()) + .get_entity(child_of.parent()) .is_ok_and(|e| e.contains::()) { // TODO: print name here once Name lives in bevy_ecs @@ -318,7 +508,7 @@ mod tests { use crate::{ entity::Entity, hierarchy::{ChildOf, Children}, - relationship::RelationshipTarget, + relationship::{RelationshipHookMode, RelationshipTarget}, spawn::{Spawn, SpawnRelated}, world::World, }; @@ -437,6 +627,55 @@ mod tests { ); } + #[test] + fn insert_children() { + let mut world = World::new(); + let child1 = world.spawn_empty().id(); + let child2 = world.spawn_empty().id(); + let child3 = world.spawn_empty().id(); + let child4 = world.spawn_empty().id(); + + let mut entity_world_mut = world.spawn_empty(); + + let first_children = entity_world_mut.add_children(&[child1, child2]); + + let root = first_children.insert_children(1, &[child3, child4]).id(); + + let hierarchy = get_hierarchy(&world, root); + assert_eq!( + hierarchy, + Node::new_with( + root, + vec![ + Node::new(child1), + Node::new(child3), + Node::new(child4), + Node::new(child2) + ] + ) + ); + } + + #[test] + fn remove_children() { + let mut world = World::new(); + let child1 = world.spawn_empty().id(); + let child2 = world.spawn_empty().id(); + let child3 = world.spawn_empty().id(); + let child4 = world.spawn_empty().id(); + + let mut root = world.spawn_empty(); + root.add_children(&[child1, child2, child3, child4]); + root.remove_children(&[child2, child3]); + let root = root.id(); + + let hierarchy = get_hierarchy(&world, root); + assert_eq!( + hierarchy, + Node::new_with(root, vec![Node::new(child1), Node::new(child4)]) + ); + } + #[test] fn self_parenting_invalid() { let mut world = World::new(); @@ -479,4 +718,312 @@ mod tests { let id = world.spawn(Children::spawn((Spawn(()), Spawn(())))).id(); assert_eq!(world.entity(id).get::().unwrap().len(), 2,); } + + #[test] + fn replace_children() { + let mut world = World::new(); + let parent = world.spawn(Children::spawn((Spawn(()), Spawn(())))).id(); + let &[child_a, child_b] = &world.entity(parent).get::().unwrap().0[..] else { + panic!("Tried to spawn 2 children on an entity and didn't get 2 children"); + }; + + let child_c = world.spawn_empty().id(); + + world + .entity_mut(parent) + .replace_children(&[child_a, child_c]); + + let children = world.entity(parent).get::().unwrap(); + + assert!(children.contains(&child_a)); + assert!(children.contains(&child_c)); + assert!(!children.contains(&child_b)); + + assert_eq!( + world.entity(child_a).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(child_c).get::().unwrap(), + &ChildOf(parent) + ); + assert!(world.entity(child_b).get::().is_none()); + } + + #[test] + fn replace_children_with_nothing() { + let mut world = World::new(); + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + let child_b = world.spawn_empty().id(); + + world.entity_mut(parent).add_children(&[child_a, child_b]); + + assert_eq!(world.entity(parent).get::().unwrap().len(), 2); + + world.entity_mut(parent).replace_children(&[]); + + assert!(world.entity(child_a).get::().is_none()); + assert!(world.entity(child_b).get::().is_none()); + } + + #[test] + fn insert_same_child_twice() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child = world.spawn_empty().id(); + + world.entity_mut(parent).add_child(child); + world.entity_mut(parent).add_child(child); + + let children = world.get::(parent).unwrap(); + assert_eq!(children.0, [child]); + assert_eq!( + world.entity(child).get::().unwrap(), + &ChildOf(parent) + ); + } + + #[test] + fn replace_with_difference() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + let child_b = world.spawn_empty().id(); + let child_c = world.spawn_empty().id(); + let child_d = world.spawn_empty().id(); + + // Test inserting new relations + world.entity_mut(parent).replace_children_with_difference( + &[], + &[child_a, child_b], + &[child_a, child_b], + ); + + assert_eq!( + world.entity(child_a).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(child_b).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(parent).get::().unwrap().0, + [child_a, child_b] + ); + + // Test replacing relations and changing order + world.entity_mut(parent).replace_children_with_difference( + &[child_b], + &[child_d, child_c, child_a], + &[child_c, child_d], + ); + assert_eq!( + world.entity(child_a).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(child_c).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(child_d).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(parent).get::().unwrap().0, + [child_d, child_c, child_a] + ); + assert!(!world.entity(child_b).contains::()); + + // Test removing relationships + world.entity_mut(parent).replace_children_with_difference( + &[child_a, child_d, child_c], + &[], + &[], + ); + assert!(!world.entity(parent).contains::()); + assert!(!world.entity(child_a).contains::()); + assert!(!world.entity(child_b).contains::()); + assert!(!world.entity(child_c).contains::()); + assert!(!world.entity(child_d).contains::()); + } + + #[test] + fn replace_with_difference_on_empty() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + + world + .entity_mut(parent) + .replace_children_with_difference(&[child_a], &[], &[]); + + assert!(!world.entity(parent).contains::()); + assert!(!world.entity(child_a).contains::()); + } + + #[test] + fn replace_with_difference_totally_new_children() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + let child_b = world.spawn_empty().id(); + let child_c = world.spawn_empty().id(); + let child_d = world.spawn_empty().id(); + + // Test inserting new relations + world.entity_mut(parent).replace_children_with_difference( + &[], + &[child_a, child_b], + &[child_a, child_b], + ); + + assert_eq!( + world.entity(child_a).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(child_b).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(parent).get::().unwrap().0, + [child_a, child_b] + ); + + // Test replacing relations and changing order + world.entity_mut(parent).replace_children_with_difference( + &[child_b, child_a], + &[child_d, child_c], + &[child_c, child_d], + ); + assert_eq!( + world.entity(child_c).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(child_d).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(parent).get::().unwrap().0, + [child_d, child_c] + ); + assert!(!world.entity(child_a).contains::()); + assert!(!world.entity(child_b).contains::()); + } + + #[test] + fn replace_children_order() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + let child_b = world.spawn_empty().id(); + let child_c = world.spawn_empty().id(); + let child_d = world.spawn_empty().id(); + + let initial_order = [child_a, child_b, child_c, child_d]; + world.entity_mut(parent).add_children(&initial_order); + + assert_eq!( + world.entity_mut(parent).get::().unwrap().0, + initial_order + ); + + let new_order = [child_d, child_b, child_a, child_c]; + world.entity_mut(parent).replace_children(&new_order); + + assert_eq!(world.entity(parent).get::().unwrap().0, new_order); + } + + #[test] + #[should_panic] + #[cfg_attr( + not(debug_assertions), + ignore = "we don't check invariants if debug assertions are off" + )] + fn replace_diff_invariant_overlapping_unrelate_with_relate() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + + world + .entity_mut(parent) + .replace_children_with_difference(&[], &[child_a], &[child_a]); + + // This should panic + world + .entity_mut(parent) + .replace_children_with_difference(&[child_a], &[child_a], &[]); + } + + #[test] + #[should_panic] + #[cfg_attr( + not(debug_assertions), + ignore = "we don't check invariants if debug assertions are off" + )] + fn replace_diff_invariant_overlapping_unrelate_with_newly() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + let child_b = world.spawn_empty().id(); + + world + .entity_mut(parent) + .replace_children_with_difference(&[], &[child_a], &[child_a]); + + // This should panic + world.entity_mut(parent).replace_children_with_difference( + &[child_b], + &[child_a, child_b], + &[child_b], + ); + } + + #[test] + #[should_panic] + #[cfg_attr( + not(debug_assertions), + ignore = "we don't check invariants if debug assertions are off" + )] + fn replace_diff_invariant_newly_not_subset() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + let child_b = world.spawn_empty().id(); + + // This should panic + world.entity_mut(parent).replace_children_with_difference( + &[], + &[child_a, child_b], + &[child_a], + ); + } + + #[test] + fn child_replace_hook_skip() { + let mut world = World::new(); + let parent = world.spawn_empty().id(); + let other = world.spawn_empty().id(); + let child = world.spawn(ChildOf(parent)).id(); + world + .entity_mut(child) + .insert_with_relationship_hook_mode(ChildOf(other), RelationshipHookMode::Skip); + assert_eq!( + &**world.entity(parent).get::().unwrap(), + &[child], + "Children should still have the old value, as on_insert/on_replace didn't run" + ); + } } diff --git a/crates/bevy_ecs/src/identifier/mod.rs b/crates/bevy_ecs/src/identifier/mod.rs index 964f03ed3a..c08ea7b4aa 100644 --- a/crates/bevy_ecs/src/identifier/mod.rs +++ b/crates/bevy_ecs/src/identifier/mod.rs @@ -21,7 +21,7 @@ pub(crate) mod masks; #[derive(Debug, Clone, Copy)] #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] #[cfg_attr(feature = "bevy_reflect", reflect(opaque))] -#[cfg_attr(feature = "bevy_reflect", reflect(Debug, Hash, PartialEq))] +#[cfg_attr(feature = "bevy_reflect", reflect(Debug, Hash, PartialEq, Clone))] // Alignment repr necessary to allow LLVM to better output // optimized codegen for `to_bits`, `PartialEq` and `Ord`. #[repr(C, align(8))] diff --git a/crates/bevy_ecs/src/intern.rs b/crates/bevy_ecs/src/intern.rs index 5639d5fbe3..b10e6a2ac6 100644 --- a/crates/bevy_ecs/src/intern.rs +++ b/crates/bevy_ecs/src/intern.rs @@ -5,7 +5,7 @@ //! and make comparisons for any type as fast as integers. use alloc::{borrow::ToOwned, boxed::Box}; -use bevy_platform_support::{ +use bevy_platform::{ collections::HashSet, hash::FixedHasher, sync::{PoisonError, RwLock}, @@ -170,7 +170,7 @@ impl Default for Interner { #[cfg(test)] mod tests { use alloc::{boxed::Box, string::ToString}; - use bevy_platform_support::hash::FixedHasher; + use bevy_platform::hash::FixedHasher; use core::hash::{BuildHasher, Hash, Hasher}; use crate::intern::{Internable, Interned, Interner}; diff --git a/crates/bevy_ecs/src/label.rs b/crates/bevy_ecs/src/label.rs index 10a23cfb89..c404c563bd 100644 --- a/crates/bevy_ecs/src/label.rs +++ b/crates/bevy_ecs/src/label.rs @@ -142,6 +142,7 @@ macro_rules! define_label { } } + #[diagnostic::do_not_recommend] impl $label_trait_name for $crate::intern::Interned { $($interned_extra_methods_impl)* @@ -180,7 +181,7 @@ macro_rules! define_label { impl $crate::intern::Internable for dyn $label_trait_name { fn leak(&self) -> &'static Self { - Box::leak(self.dyn_clone()) + $crate::label::Box::leak(self.dyn_clone()) } fn ref_eq(&self, other: &Self) -> bool { diff --git a/crates/bevy_ecs/src/lib.rs b/crates/bevy_ecs/src/lib.rs index 26b888f230..99f95763d5 100644 --- a/crates/bevy_ecs/src/lib.rs +++ b/crates/bevy_ecs/src/lib.rs @@ -2,13 +2,6 @@ unsafe_op_in_unsafe_fn, reason = "See #11590. To be removed once all applicable unsafe code has an unsafe block with a safety comment." )] -#![cfg_attr( - test, - expect( - dependency_on_unit_never_type_fallback, - reason = "See #17340. To be removed once Edition 2024 is released" - ) -)] #![doc = include_str!("../README.md")] #![cfg_attr( any(docsrs, docsrs_dep), @@ -43,12 +36,14 @@ pub mod change_detection; pub mod component; pub mod entity; pub mod entity_disabling; +pub mod error; pub mod event; pub mod hierarchy; pub mod identifier; pub mod intern; pub mod label; pub mod name; +pub mod never; pub mod observer; pub mod query; #[cfg(feature = "bevy_reflect")] @@ -56,7 +51,6 @@ pub mod reflect; pub mod relationship; pub mod removal_detection; pub mod resource; -pub mod result; pub mod schedule; pub mod spawn; pub mod storage; @@ -79,8 +73,9 @@ pub mod prelude { bundle::Bundle, change_detection::{DetectChanges, DetectChangesMut, Mut, Ref}, children, - component::{require, Component}, - entity::{Entity, EntityBorrow, EntityMapper}, + component::Component, + entity::{ContainsEntity, Entity, EntityMapper}, + error::{BevyError, Result}, event::{Event, EventMutator, EventReader, EventWriter, Events}, hierarchy::{ChildOf, ChildSpawner, ChildSpawnerCommands, Children}, name::{Name, NameOrEntity}, @@ -90,17 +85,16 @@ pub mod prelude { relationship::RelationshipTarget, removal_detection::RemovedComponents, resource::Resource, - result::{Error, Result}, schedule::{ - apply_deferred, common_conditions::*, ApplyDeferred, Condition, IntoSystemConfigs, - IntoSystemSet, IntoSystemSetConfigs, Schedule, Schedules, SystemSet, + apply_deferred, common_conditions::*, ApplyDeferred, Condition, IntoScheduleConfigs, + IntoSystemSet, Schedule, Schedules, SystemSet, }, spawn::{Spawn, SpawnRelated}, system::{ Command, Commands, Deferred, EntityCommand, EntityCommands, In, InMut, InRef, IntoSystem, Local, NonSend, NonSendMut, ParamSet, Populated, Query, ReadOnlySystem, Res, ResMut, Single, System, SystemIn, SystemInput, SystemParamBuilder, - SystemParamFunction, WithParamWarnPolicy, + SystemParamFunction, }, world::{ EntityMut, EntityRef, EntityWorldMut, FilteredResources, FilteredResourcesMut, @@ -139,8 +133,8 @@ mod tests { use crate::{ bundle::Bundle, change_detection::Ref, - component::{require, Component, ComponentId, RequiredComponents, RequiredComponentsError}, - entity::Entity, + component::{Component, ComponentId, RequiredComponents, RequiredComponentsError}, + entity::{Entity, EntityMapper}, entity_disabling::DefaultQueryFilters, prelude::Or, query::{Added, Changed, FilteredAccess, QueryFilter, With, Without}, @@ -153,8 +147,7 @@ mod tests { vec, vec::Vec, }; - use bevy_ecs_macros::{VisitEntities, VisitEntitiesMut}; - use bevy_platform_support::collections::HashSet; + use bevy_platform::collections::HashSet; use bevy_tasks::{ComputeTaskPool, TaskPool}; use core::{ any::TypeId, @@ -236,7 +229,7 @@ mod tests { y: SparseStored, } let mut ids = Vec::new(); - ::component_ids(&mut world.components, &mut |id| { + ::component_ids(&mut world.components_registrator(), &mut |id| { ids.push(id); }); @@ -286,7 +279,7 @@ mod tests { } let mut ids = Vec::new(); - ::component_ids(&mut world.components, &mut |id| { + ::component_ids(&mut world.components_registrator(), &mut |id| { ids.push(id); }); @@ -338,9 +331,12 @@ mod tests { } let mut ids = Vec::new(); - ::component_ids(&mut world.components, &mut |id| { - ids.push(id); - }); + ::component_ids( + &mut world.components_registrator(), + &mut |id| { + ids.push(id); + }, + ); assert_eq!(ids, &[world.register_component::(),]); @@ -1709,6 +1705,10 @@ mod tests { let values = vec![(e0, (B(0), C)), (e1, (B(1), C))]; + #[expect( + deprecated, + reason = "This needs to be supported for now, and therefore still needs the test." + )] world.insert_or_spawn_batch(values).unwrap(); assert_eq!( @@ -1749,6 +1749,10 @@ mod tests { let values = vec![(e0, (B(0), C)), (e1, (B(1), C)), (invalid_e2, (B(2), C))]; + #[expect( + deprecated, + reason = "This needs to be supported for now, and therefore still needs the test." + )] let result = world.insert_or_spawn_batch(values); assert_eq!( @@ -1923,7 +1927,7 @@ mod tests { struct X; #[derive(Component)] - #[require(Z(new_z))] + #[require(Z = new_z())] struct Y { value: String, } @@ -2642,6 +2646,37 @@ mod tests { assert_eq!(to_vec(required_z), vec![(b, 0), (c, 1)]); } + #[test] + fn required_components_inheritance_depth_bias() { + #[derive(Component, PartialEq, Eq, Clone, Copy, Debug)] + struct MyRequired(bool); + + #[derive(Component, Default)] + #[require(MyRequired(false))] + struct MiddleMan; + + #[derive(Component, Default)] + #[require(MiddleMan)] + struct ConflictingRequire; + + #[derive(Component, Default)] + #[require(MyRequired(true))] + struct MyComponent; + + let mut world = World::new(); + let order_a = world + .spawn((ConflictingRequire, MyComponent)) + .get::() + .cloned(); + let order_b = world + .spawn((MyComponent, ConflictingRequire)) + .get::() + .cloned(); + + assert_eq!(order_a, Some(MyRequired(true))); + assert_eq!(order_b, Some(MyRequired(true))); + } + #[test] #[should_panic = "Recursive required components detected: A → B → C → B\nhelp: If this is intentional, consider merging the components."] fn required_components_recursion_errors() { @@ -2670,8 +2705,19 @@ mod tests { World::new().register_component::(); } + #[derive(Default)] + struct CaptureMapper(Vec); + impl EntityMapper for CaptureMapper { + fn get_mapped(&mut self, source: Entity) -> Entity { + self.0.push(source); + source + } + + fn set_mapped(&mut self, _source: Entity, _target: Entity) {} + } + #[test] - fn visit_struct_entities() { + fn map_struct_entities() { #[derive(Component)] #[expect( unused, @@ -2698,30 +2744,22 @@ mod tests { let e3 = world.spawn_empty().id(); let mut foo = Foo(1, e1); - let mut entities = Vec::new(); - Component::visit_entities(&foo, |e| entities.push(e)); - assert_eq!(&entities, &[e1]); - - let mut entities = Vec::new(); - Component::visit_entities_mut(&mut foo, |e| entities.push(*e)); - assert_eq!(&entities, &[e1]); + let mut mapper = CaptureMapper::default(); + Component::map_entities(&mut foo, &mut mapper); + assert_eq!(&mapper.0, &[e1]); let mut bar = Bar { a: e1, b: 1, c: vec![e2, e3], }; - let mut entities = Vec::new(); - Component::visit_entities(&bar, |e| entities.push(e)); - assert_eq!(&entities, &[e1, e2, e3]); - - let mut entities = Vec::new(); - Component::visit_entities_mut(&mut bar, |e| entities.push(*e)); - assert_eq!(&entities, &[e1, e2, e3]); + let mut mapper = CaptureMapper::default(); + Component::map_entities(&mut bar, &mut mapper); + assert_eq!(&mapper.0, &[e1, e2, e3]); } #[test] - fn visit_enum_entities() { + fn map_enum_entities() { #[derive(Component)] #[expect( unused, @@ -2744,26 +2782,18 @@ mod tests { let e3 = world.spawn_empty().id(); let mut foo = Foo::Bar(1, e1); - let mut entities = Vec::new(); - Component::visit_entities(&foo, |e| entities.push(e)); - assert_eq!(&entities, &[e1]); - - let mut entities = Vec::new(); - Component::visit_entities_mut(&mut foo, |e| entities.push(*e)); - assert_eq!(&entities, &[e1]); + let mut mapper = CaptureMapper::default(); + Component::map_entities(&mut foo, &mut mapper); + assert_eq!(&mapper.0, &[e1]); let mut foo = Foo::Baz { a: e1, b: 1, c: vec![e2, e3], }; - let mut entities = Vec::new(); - Component::visit_entities(&foo, |e| entities.push(e)); - assert_eq!(&entities, &[e1, e2, e3]); - - let mut entities = Vec::new(); - Component::visit_entities_mut(&mut foo, |e| entities.push(*e)); - assert_eq!(&entities, &[e1, e2, e3]); + let mut mapper = CaptureMapper::default(); + Component::map_entities(&mut foo, &mut mapper); + assert_eq!(&mapper.0, &[e1, e2, e3]); } #[expect( @@ -2792,16 +2822,18 @@ mod tests { field1: ComponentB, } - #[derive(Component, VisitEntities, VisitEntitiesMut)] + #[derive(Component)] struct MyEntities { + #[entities] entities: Vec, + #[entities] another_one: Entity, + #[entities] maybe_entity: Option, #[expect( dead_code, reason = "This struct is used as a compilation test to test the derive macros, and as such this field is intentionally never used." )] - #[visit_entities(ignore)] something_else: String, } @@ -2809,6 +2841,6 @@ mod tests { dead_code, reason = "This struct is used as a compilation test to test the derive macros, and as such is intentionally never constructed." )] - #[derive(Component, VisitEntities, VisitEntitiesMut)] - struct MyEntitiesTuple(Vec, Entity, #[visit_entities(ignore)] usize); + #[derive(Component)] + struct MyEntitiesTuple(#[entities] Vec, #[entities] Entity, usize); } diff --git a/crates/bevy_ecs/src/name.rs b/crates/bevy_ecs/src/name.rs index 9c38b28481..dd34f5578a 100644 --- a/crates/bevy_ecs/src/name.rs +++ b/crates/bevy_ecs/src/name.rs @@ -6,7 +6,7 @@ use alloc::{ borrow::{Cow, ToOwned}, string::String, }; -use bevy_platform_support::hash::FixedHasher; +use bevy_platform::hash::FixedHasher; use core::{ hash::{BuildHasher, Hash, Hasher}, ops::Deref, @@ -41,7 +41,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Component, Default, Debug) + reflect(Component, Default, Debug, Clone, Hash, PartialEq) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_ecs/src/never.rs b/crates/bevy_ecs/src/never.rs new file mode 100644 index 0000000000..ba814c7006 --- /dev/null +++ b/crates/bevy_ecs/src/never.rs @@ -0,0 +1,39 @@ +//! A workaround for the `!` type in stable Rust. +//! +//! This approach is taken from the [`never_say_never`] crate, +//! reimplemented here to avoid adding a new dependency. +//! +//! This module exists due to a change in [never type fallback inference] in the Rust 2024 edition. +//! This caused failures in `bevy_ecs`'s traits which are implemented for functions +//! (like [`System`](crate::system::System)) when working with panicking closures. +//! +//! Note that using this hack is not recommended in general; +//! by doing so you are knowingly opting out of rustc's stability guarantees. +//! Code that compiles due to this hack may break in future versions of Rust. +//! +//! Please read [issue #18778](https://github.com/bevyengine/bevy/issues/18778) for an explanation of why +//! Bevy has chosen to use this workaround. +//! +//! [`never_say_never`]: https://crates.io/crates/never_say_never +//! [never type fallback inference]: https://doc.rust-lang.org/edition-guide/rust-2024/never-type-fallback.html + +mod fn_ret { + /// A helper trait for naming the ! type. + #[doc(hidden)] + pub trait FnRet { + /// The return type of the function. + type Output; + } + + /// This blanket implementation allows us to name the never type, + /// by using the associated type of this trait for `fn() -> !`. + impl FnRet for fn() -> R { + type Output = R; + } +} + +/// A hacky type alias for the `!` (never) type. +/// +/// This knowingly opts out of rustc's stability guarantees. +/// Read the module documentation carefully before using this! +pub type Never = ! as fn_ret::FnRet>::Output; diff --git a/crates/bevy_ecs/src/observer/entity_observer.rs b/crates/bevy_ecs/src/observer/entity_observer.rs index 556ca0d8aa..d69f7764fe 100644 --- a/crates/bevy_ecs/src/observer/entity_observer.rs +++ b/crates/bevy_ecs/src/observer/entity_observer.rs @@ -2,9 +2,8 @@ use crate::{ component::{ Component, ComponentCloneBehavior, ComponentHook, HookContext, Mutable, StorageType, }, - entity::{ComponentCloneCtx, Entity, EntityClonerBuilder}, + entity::{ComponentCloneCtx, Entity, EntityClonerBuilder, EntityMapper, SourceComponent}, observer::ObserverState, - system::Commands, world::World, }; use alloc::vec::Vec; @@ -64,11 +63,11 @@ impl EntityClonerBuilder<'_> { } } -fn component_clone_observed_by(commands: &mut Commands, ctx: &mut ComponentCloneCtx) { +fn component_clone_observed_by(_source: &SourceComponent, ctx: &mut ComponentCloneCtx) { let target = ctx.target(); let source = ctx.source(); - commands.queue(move |world: &mut World| { + ctx.queue_deferred(move |world: &mut World, _mapper: &mut dyn EntityMapper| { let observed_by = world .get::(source) .map(|observed_by| observed_by.0.clone()) diff --git a/crates/bevy_ecs/src/observer/mod.rs b/crates/bevy_ecs/src/observer/mod.rs index 8c22ace911..78569bc4ec 100644 --- a/crates/bevy_ecs/src/observer/mod.rs +++ b/crates/bevy_ecs/src/observer/mod.rs @@ -5,17 +5,19 @@ mod runner; pub use entity_observer::ObservedBy; pub use runner::*; +use variadics_please::all_tuples; use crate::{ archetype::ArchetypeFlags, + change_detection::MaybeLocation, component::ComponentId, - entity::hash_map::EntityHashMap, + entity::EntityHashMap, prelude::*, system::IntoObserverSystem, world::{DeferredWorld, *}, }; use alloc::vec::Vec; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_ptr::Ptr; use core::{ fmt::Debug, @@ -24,9 +26,6 @@ use core::{ }; use smallvec::SmallVec; -#[cfg(feature = "track_location")] -use core::panic::Location; - /// Type containing triggered [`Event`] information for a given run of an [`Observer`]. This contains the /// [`Event`] data itself. If it was triggered for a specific [`Entity`], it includes that as well. It also /// contains event propagation information. See [`Trigger::propagate`] for more information. @@ -143,8 +142,7 @@ impl<'w, E, B: Bundle> Trigger<'w, E, B> { } /// Returns the source code location that triggered this observer. - #[cfg(feature = "track_location")] - pub fn caller(&self) -> &'static Location<'static> { + pub fn caller(&self) -> MaybeLocation { self.trigger.caller } } @@ -180,92 +178,108 @@ impl<'w, E, B: Bundle> DerefMut for Trigger<'w, E, B> { /// will run. pub trait TriggerTargets { /// The components the trigger should target. - fn components(&self) -> &[ComponentId]; + fn components(&self) -> impl Iterator + Clone + '_; /// The entities the trigger should target. - fn entities(&self) -> &[Entity]; + fn entities(&self) -> impl Iterator + Clone + '_; } -impl TriggerTargets for () { - fn components(&self) -> &[ComponentId] { - &[] +impl TriggerTargets for &T { + fn components(&self) -> impl Iterator + Clone + '_ { + (**self).components() } - fn entities(&self) -> &[Entity] { - &[] + fn entities(&self) -> impl Iterator + Clone + '_ { + (**self).entities() } } impl TriggerTargets for Entity { - fn components(&self) -> &[ComponentId] { - &[] + fn components(&self) -> impl Iterator + Clone + '_ { + [].into_iter() } - fn entities(&self) -> &[Entity] { - core::slice::from_ref(self) - } -} - -impl TriggerTargets for Vec { - fn components(&self) -> &[ComponentId] { - &[] - } - - fn entities(&self) -> &[Entity] { - self.as_slice() - } -} - -impl TriggerTargets for [Entity; N] { - fn components(&self) -> &[ComponentId] { - &[] - } - - fn entities(&self) -> &[Entity] { - self.as_slice() + fn entities(&self) -> impl Iterator + Clone + '_ { + core::iter::once(*self) } } impl TriggerTargets for ComponentId { - fn components(&self) -> &[ComponentId] { - core::slice::from_ref(self) + fn components(&self) -> impl Iterator + Clone + '_ { + core::iter::once(*self) } - fn entities(&self) -> &[Entity] { - &[] + fn entities(&self) -> impl Iterator + Clone + '_ { + [].into_iter() } } -impl TriggerTargets for Vec { - fn components(&self) -> &[ComponentId] { - self.as_slice() +impl TriggerTargets for Vec { + fn components(&self) -> impl Iterator + Clone + '_ { + self.iter().flat_map(T::components) } - fn entities(&self) -> &[Entity] { - &[] + fn entities(&self) -> impl Iterator + Clone + '_ { + self.iter().flat_map(T::entities) } } -impl TriggerTargets for [ComponentId; N] { - fn components(&self) -> &[ComponentId] { - self.as_slice() +impl TriggerTargets for [T; N] { + fn components(&self) -> impl Iterator + Clone + '_ { + self.iter().flat_map(T::components) } - fn entities(&self) -> &[Entity] { - &[] + fn entities(&self) -> impl Iterator + Clone + '_ { + self.iter().flat_map(T::entities) } } -impl TriggerTargets for &Vec { - fn components(&self) -> &[ComponentId] { - &[] +impl TriggerTargets for [T] { + fn components(&self) -> impl Iterator + Clone + '_ { + self.iter().flat_map(T::components) } - fn entities(&self) -> &[Entity] { - self.as_slice() + fn entities(&self) -> impl Iterator + Clone + '_ { + self.iter().flat_map(T::entities) } } +macro_rules! impl_trigger_targets_tuples { + ($(#[$meta:meta])* $($trigger_targets: ident),*) => { + #[expect(clippy::allow_attributes, reason = "can't guarantee violation of non_snake_case")] + #[allow(non_snake_case, reason = "`all_tuples!()` generates non-snake-case variable names.")] + $(#[$meta])* + impl<$($trigger_targets: TriggerTargets),*> TriggerTargets for ($($trigger_targets,)*) + { + fn components(&self) -> impl Iterator + Clone + '_ { + let iter = [].into_iter(); + let ($($trigger_targets,)*) = self; + $( + let iter = iter.chain($trigger_targets.components()); + )* + iter + } + + fn entities(&self) -> impl Iterator + Clone + '_ { + let iter = [].into_iter(); + let ($($trigger_targets,)*) = self; + $( + let iter = iter.chain($trigger_targets.entities()); + )* + iter + } + } + } +} + +all_tuples!( + #[doc(fake_variadic)] + impl_trigger_targets_tuples, + 0, + 15, + T +); + /// A description of what an [`Observer`] observes. #[derive(Default, Clone)] pub struct ObserverDescriptor { @@ -335,10 +349,8 @@ pub struct ObserverTrigger { components: SmallVec<[ComponentId; 2]>, /// The entity the trigger targeted. pub target: Entity, - /// The location of the source code that triggered the obserer. - #[cfg(feature = "track_location")] - pub caller: &'static Location<'static>, + pub caller: MaybeLocation, } impl ObserverTrigger { @@ -415,7 +427,7 @@ impl Observers { components: impl Iterator + Clone, data: &mut T, propagate: &mut bool, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { // SAFETY: You cannot get a mutable reference to `observers` from `DeferredWorld` let (mut world, observers) = unsafe { @@ -440,7 +452,6 @@ impl Observers { event_type, components: components.clone().collect(), target, - #[cfg(feature = "track_location")] caller, }, data.into(), @@ -565,28 +576,14 @@ impl World { /// If you need to use the event after triggering it, use [`World::trigger_ref`] instead. #[track_caller] pub fn trigger(&mut self, event: E) { - self.trigger_with_caller( - event, - #[cfg(feature = "track_location")] - Location::caller(), - ); + self.trigger_with_caller(event, MaybeLocation::caller()); } - pub(crate) fn trigger_with_caller( - &mut self, - mut event: E, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, - ) { + pub(crate) fn trigger_with_caller(&mut self, mut event: E, caller: MaybeLocation) { let event_id = E::register_component_id(self); // SAFETY: We just registered `event_id` with the type of `event` unsafe { - self.trigger_targets_dynamic_ref_with_caller( - event_id, - &mut event, - (), - #[cfg(feature = "track_location")] - caller, - ); + self.trigger_targets_dynamic_ref_with_caller(event_id, &mut event, (), caller); } } @@ -608,30 +605,19 @@ impl World { /// If you need to use the event after triggering it, use [`World::trigger_targets_ref`] instead. #[track_caller] pub fn trigger_targets(&mut self, event: E, targets: impl TriggerTargets) { - self.trigger_targets_with_caller( - event, - targets, - #[cfg(feature = "track_location")] - Location::caller(), - ); + self.trigger_targets_with_caller(event, targets, MaybeLocation::caller()); } pub(crate) fn trigger_targets_with_caller( &mut self, mut event: E, targets: impl TriggerTargets, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { let event_id = E::register_component_id(self); // SAFETY: We just registered `event_id` with the type of `event` unsafe { - self.trigger_targets_dynamic_ref_with_caller( - event_id, - &mut event, - targets, - #[cfg(feature = "track_location")] - caller, - ); + self.trigger_targets_dynamic_ref_with_caller(event_id, &mut event, targets, caller); } } @@ -689,8 +675,7 @@ impl World { event_id, event_data, targets, - #[cfg(feature = "track_location")] - Location::caller(), + MaybeLocation::caller(), ); } @@ -702,10 +687,11 @@ impl World { event_id: ComponentId, event_data: &mut E, targets: Targets, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { let mut world = DeferredWorld::from(self); - if targets.entities().is_empty() { + let mut entity_targets = targets.entities().peekable(); + if entity_targets.peek().is_none() { // SAFETY: `event_data` is accessible as the type represented by `event_id` unsafe { world.trigger_observers_with_data::<_, E::Traversal>( @@ -714,21 +700,19 @@ impl World { targets.components(), event_data, false, - #[cfg(feature = "track_location")] caller, ); }; } else { - for target in targets.entities() { + for target_entity in entity_targets { // SAFETY: `event_data` is accessible as the type represented by `event_id` unsafe { world.trigger_observers_with_data::<_, E::Traversal>( event_id, - *target, + target_entity, targets.components(), event_data, E::AUTO_PROPAGATE, - #[cfg(feature = "track_location")] caller, ); }; @@ -858,14 +842,13 @@ impl World { #[cfg(test)] mod tests { use alloc::{vec, vec::Vec}; - #[cfg(feature = "track_location")] - use core::panic::Location; - use bevy_platform_support::collections::HashMap; + use bevy_platform::collections::HashMap; use bevy_ptr::OwningPtr; use crate::component::ComponentId; use crate::{ + change_detection::MaybeLocation, observer::{Observer, ObserverDescriptor, ObserverState, OnReplace}, prelude::*, traversal::Traversal, @@ -911,15 +894,10 @@ mod tests { } } - #[derive(Component)] + #[derive(Component, Event)] + #[event(traversal = &'static ChildOf, auto_propagate)] struct EventPropagating; - impl Event for EventPropagating { - type Traversal = &'static ChildOf; - - const AUTO_PROPAGATE: bool = true; - } - #[test] fn observer_order_spawn_despawn() { let mut world = World::new(); @@ -1150,11 +1128,10 @@ mod tests { fn observer_despawn() { let mut world = World::new(); - let observer = world - .add_observer(|_: Trigger| { - panic!("Observer triggered after being despawned.") - }) - .id(); + let system: fn(Trigger) = |_| { + panic!("Observer triggered after being despawned."); + }; + let observer = world.add_observer(system).id(); world.despawn(observer); world.spawn(A).flush(); } @@ -1171,11 +1148,11 @@ mod tests { res.observed("remove_a"); }); - let observer = world - .add_observer(|_: Trigger| { - panic!("Observer triggered after being despawned.") - }) - .flush(); + let system: fn(Trigger) = |_: Trigger| { + panic!("Observer triggered after being despawned."); + }; + + let observer = world.add_observer(system).flush(); world.despawn(observer); world.despawn(entity); @@ -1201,9 +1178,10 @@ mod tests { let mut world = World::new(); world.init_resource::(); - world - .spawn_empty() - .observe(|_: Trigger| panic!("Trigger routed to non-targeted entity.")); + let system: fn(Trigger) = |_| { + panic!("Trigger routed to non-targeted entity."); + }; + world.spawn_empty().observe(system); world.add_observer(move |obs: Trigger, mut res: ResMut| { assert_eq!(obs.target(), Entity::PLACEHOLDER); res.observed("event_a"); @@ -1222,9 +1200,11 @@ mod tests { let mut world = World::new(); world.init_resource::(); - world - .spawn_empty() - .observe(|_: Trigger| panic!("Trigger routed to non-targeted entity.")); + let system: fn(Trigger) = |_| { + panic!("Trigger routed to non-targeted entity."); + }; + + world.spawn_empty().observe(system); let entity = world .spawn_empty() .observe(|_: Trigger, mut res: ResMut| res.observed("a_1")) @@ -1242,6 +1222,119 @@ mod tests { assert_eq!(vec!["a_2", "a_1"], world.resource::().0); } + #[test] + fn observer_multiple_targets() { + #[derive(Resource, Default)] + struct R(i32); + + let mut world = World::new(); + let component_a = world.register_component::(); + let component_b = world.register_component::(); + world.init_resource::(); + + // targets (entity_1, A) + let entity_1 = world + .spawn_empty() + .observe(|_: Trigger, mut res: ResMut| res.0 += 1) + .id(); + // targets (entity_2, B) + let entity_2 = world + .spawn_empty() + .observe(|_: Trigger, mut res: ResMut| res.0 += 10) + .id(); + // targets any entity or component + world.add_observer(|_: Trigger, mut res: ResMut| res.0 += 100); + // targets any entity, and components A or B + world.add_observer(|_: Trigger, mut res: ResMut| res.0 += 1000); + // test all tuples + world.add_observer(|_: Trigger, mut res: ResMut| res.0 += 10000); + world.add_observer( + |_: Trigger, mut res: ResMut| { + res.0 += 100000; + }, + ); + world.add_observer( + |_: Trigger, + mut res: ResMut| res.0 += 1000000, + ); + + // WorldEntityMut does not automatically flush. + world.flush(); + + // trigger for an entity and a component + world.trigger_targets(EventA, (entity_1, component_a)); + world.flush(); + // only observer that doesn't trigger is the one only watching entity_2 + assert_eq!(1111101, world.resource::().0); + world.resource_mut::().0 = 0; + + // trigger for both entities, but no components: trigger once per entity target + world.trigger_targets(EventA, (entity_1, entity_2)); + world.flush(); + // only the observer that doesn't require components triggers - once per entity + assert_eq!(200, world.resource::().0); + world.resource_mut::().0 = 0; + + // trigger for both components, but no entities: trigger once + world.trigger_targets(EventA, (component_a, component_b)); + world.flush(); + // all component observers trigger, entities are not observed + assert_eq!(1111100, world.resource::().0); + world.resource_mut::().0 = 0; + + // trigger for both entities and both components: trigger once per entity target + // we only get 2222211 because a given observer can trigger only once per entity target + world.trigger_targets(EventA, ((component_a, component_b), (entity_1, entity_2))); + world.flush(); + assert_eq!(2222211, world.resource::().0); + world.resource_mut::().0 = 0; + + // trigger to test complex tuples: (A, B, (A, B)) + world.trigger_targets( + EventA, + (component_a, component_b, (component_a, component_b)), + ); + world.flush(); + // the duplicate components in the tuple don't cause multiple triggers + assert_eq!(1111100, world.resource::().0); + world.resource_mut::().0 = 0; + + // trigger to test complex tuples: (A, B, (A, B), ((A, B), (A, B))) + world.trigger_targets( + EventA, + ( + component_a, + component_b, + (component_a, component_b), + ((component_a, component_b), (component_a, component_b)), + ), + ); + world.flush(); + // the duplicate components in the tuple don't cause multiple triggers + assert_eq!(1111100, world.resource::().0); + world.resource_mut::().0 = 0; + + // trigger to test the most complex tuple: (A, B, (A, B), (B, A), (A, B, ((A, B), (B, A)))) + world.trigger_targets( + EventA, + ( + component_a, + component_b, + (component_a, component_b), + (component_b, component_a), + ( + component_a, + component_b, + ((component_a, component_b), (component_b, component_a)), + ), + ), + ); + world.flush(); + // the duplicate components in the tuple don't cause multiple triggers + assert_eq!(1111100, world.resource::().0); + world.resource_mut::().0 = 0; + } + #[test] fn observer_dynamic_component() { let mut world = World::new(); @@ -1555,6 +1648,23 @@ mod tests { assert_eq!(vec!["event", "event"], world.resource::().0); } + // Originally for https://github.com/bevyengine/bevy/issues/18452 + #[test] + fn observer_modifies_relationship() { + fn on_add(trigger: Trigger, mut commands: Commands) { + commands + .entity(trigger.target()) + .with_related_entities::(|rsc| { + rsc.spawn_empty(); + }); + } + + let mut world = World::new(); + world.add_observer(on_add); + world.spawn(A); + world.flush(); + } + // Regression test for https://github.com/bevyengine/bevy/issues/14467 // Fails prior to https://github.com/bevyengine/bevy/pull/15398 #[test] @@ -1615,13 +1725,12 @@ mod tests { } #[test] - #[cfg(feature = "track_location")] #[track_caller] fn observer_caller_location_event() { #[derive(Event)] struct EventA; - let caller = Location::caller(); + let caller = MaybeLocation::caller(); let mut world = World::new(); world.add_observer(move |trigger: Trigger| { assert_eq!(trigger.caller(), caller); @@ -1630,13 +1739,12 @@ mod tests { } #[test] - #[cfg(feature = "track_location")] #[track_caller] fn observer_caller_location_command_archetype_move() { #[derive(Component)] struct Component; - let caller = Location::caller(); + let caller = MaybeLocation::caller(); let mut world = World::new(); world.add_observer(move |trigger: Trigger| { assert_eq!(trigger.caller(), caller); diff --git a/crates/bevy_ecs/src/observer/runner.rs b/crates/bevy_ecs/src/observer/runner.rs index 74b6519579..d68c495dab 100644 --- a/crates/bevy_ecs/src/observer/runner.rs +++ b/crates/bevy_ecs/src/observer/runner.rs @@ -3,6 +3,7 @@ use core::any::Any; use crate::{ component::{ComponentHook, ComponentId, HookContext, Mutable, StorageType}, + error::{default_error_handler, ErrorContext}, observer::{ObserverDescriptor, ObserverTrigger}, prelude::*, query::DebugCheckedUnwrap, @@ -272,6 +273,7 @@ pub struct Observer { system: Box, descriptor: ObserverDescriptor, hook_on_add: ComponentHook, + error_handler: Option, } impl Observer { @@ -282,6 +284,7 @@ impl Observer { system: Box::new(IntoObserverSystem::into_system(system)), descriptor: Default::default(), hook_on_add: hook_on_add::, + error_handler: None, } } @@ -316,6 +319,14 @@ impl Observer { self } + /// Set the error handler to use for this observer. + /// + /// See the [`error` module-level documentation](crate::error) for more information. + pub fn with_error_handler(mut self, error_handler: fn(BevyError, ErrorContext)) -> Self { + self.error_handler = Some(error_handler); + self + } + /// Returns the [`ObserverDescriptor`] for this [`Observer`]. pub fn descriptor(&self) -> &ObserverDescriptor { &self.descriptor @@ -363,6 +374,15 @@ fn observer_system_runner>( } state.last_trigger_id = last_trigger; + // SAFETY: Observer was triggered so must have an `Observer` component. + let error_handler = unsafe { + observer_cell + .get::() + .debug_checked_unwrap() + .error_handler + .debug_checked_unwrap() + }; + let trigger: Trigger = Trigger::new( // SAFETY: Caller ensures `ptr` is castable to `&mut T` unsafe { ptr.deref_mut() }, @@ -382,12 +402,34 @@ fn observer_system_runner>( // - `update_archetype_component_access` is called first // - there are no outstanding references to world except a private component // - system is an `ObserverSystem` so won't mutate world beyond the access of a `DeferredWorld` + // and is never exclusive // - system is the same type erased system from above unsafe { (*system).update_archetype_component_access(world); - if (*system).validate_param_unsafe(world) { - (*system).run_unsafe(trigger, world); - (*system).queue_deferred(world.into_deferred()); + match (*system).validate_param_unsafe(world) { + Ok(()) => { + if let Err(err) = (*system).run_unsafe(trigger, world) { + error_handler( + err, + ErrorContext::Observer { + name: (*system).name(), + last_run: (*system).get_last_run(), + }, + ); + }; + (*system).queue_deferred(world.into_deferred()); + } + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::Observer { + name: (*system).name(), + last_run: (*system).get_last_run(), + }, + ); + } + } } } } @@ -407,7 +449,7 @@ fn hook_on_add>( world.commands().queue(move |world: &mut World| { let event_id = E::register_component_id(world); let mut components = Vec::new(); - B::component_ids(&mut world.components, &mut |id| { + B::component_ids(&mut world.components_registrator(), &mut |id| { components.push(id); }); let mut descriptor = ObserverDescriptor { @@ -416,10 +458,15 @@ fn hook_on_add>( ..Default::default() }; + let error_handler = default_error_handler(); + // Initialize System let system: *mut dyn ObserverSystem = if let Some(mut observe) = world.get_mut::(entity) { descriptor.merge(&observe.descriptor); + if observe.error_handler.is_none() { + observe.error_handler = Some(error_handler); + } let system = observe.system.downcast_mut::().unwrap(); &mut *system } else { @@ -442,3 +489,44 @@ fn hook_on_add>( } }); } + +#[cfg(test)] +mod tests { + use super::*; + use crate::{event::Event, observer::Trigger}; + + #[derive(Event)] + struct TriggerEvent; + + #[test] + #[should_panic(expected = "I failed!")] + fn test_fallible_observer() { + fn system(_: Trigger) -> Result { + Err("I failed!".into()) + } + + let mut world = World::default(); + world.add_observer(system); + Schedule::default().run(&mut world); + world.trigger(TriggerEvent); + } + + #[test] + fn test_fallible_observer_ignored_errors() { + #[derive(Resource, Default)] + struct Ran(bool); + + fn system(_: Trigger, mut ran: ResMut) -> Result { + ran.0 = true; + Err("I failed!".into()) + } + + let mut world = World::default(); + world.init_resource::(); + let observer = Observer::new(system).with_error_handler(crate::error::ignore); + world.spawn(observer); + Schedule::default().run(&mut world); + world.trigger(TriggerEvent); + assert!(world.resource::().0); + } +} diff --git a/crates/bevy_ecs/src/query/access.rs b/crates/bevy_ecs/src/query/access.rs index 089d6914c6..01e3713ad6 100644 --- a/crates/bevy_ecs/src/query/access.rs +++ b/crates/bevy_ecs/src/query/access.rs @@ -3,9 +3,10 @@ use crate::storage::SparseSetIndex; use crate::world::World; use alloc::{format, string::String, vec, vec::Vec}; use core::{fmt, fmt::Debug, marker::PhantomData}; -use derive_more::derive::From; +use derive_more::From; use disqualified::ShortName; use fixedbitset::FixedBitSet; +use thiserror::Error; /// A wrapper struct to make Debug representations of [`FixedBitSet`] easier /// to read, when used to store [`SparseSetIndex`]. @@ -773,38 +774,99 @@ impl Access { self.archetypal.ones().map(T::get_sparse_set_index) } - /// Returns an iterator over the component IDs that this `Access` either - /// reads and writes or can't read or write. + /// Returns an iterator over the component IDs and their [`ComponentAccessKind`]. /// - /// The returned flag specifies whether the list consists of the components - /// that the access *can* read or write (false) or whether the list consists - /// of the components that the access *can't* read or write (true). + /// Returns `Err(UnboundedAccess)` if the access is unbounded. + /// This typically occurs when an [`Access`] is marked as accessing all + /// components, and then adding exceptions. /// - /// Because this method depends on internal implementation details of - /// `Access`, it's not recommended. Prefer to manage your own lists of - /// accessible components if your application needs to do that. - #[doc(hidden)] - // TODO: this should be deprecated and removed, see https://github.com/bevyengine/bevy/issues/16339 - pub fn component_reads_and_writes(&self) -> (impl Iterator + '_, bool) { - ( - self.component_read_and_writes - .ones() - .map(T::get_sparse_set_index), - self.component_read_and_writes_inverted, - ) - } + /// # Examples + /// + /// ```rust + /// # use bevy_ecs::query::{Access, ComponentAccessKind}; + /// let mut access = Access::::default(); + /// + /// access.add_component_read(1); + /// access.add_component_write(2); + /// access.add_archetypal(3); + /// + /// let result = access + /// .try_iter_component_access() + /// .map(Iterator::collect::>); + /// + /// assert_eq!( + /// result, + /// Ok(vec![ + /// ComponentAccessKind::Shared(1), + /// ComponentAccessKind::Exclusive(2), + /// ComponentAccessKind::Archetypal(3), + /// ]), + /// ); + /// ``` + pub fn try_iter_component_access( + &self, + ) -> Result> + '_, UnboundedAccessError> { + // component_writes_inverted is only ever true when component_read_and_writes_inverted is + // also true. Therefore it is sufficient to check just component_read_and_writes_inverted. + if self.component_read_and_writes_inverted { + return Err(UnboundedAccessError { + writes_inverted: self.component_writes_inverted, + read_and_writes_inverted: self.component_read_and_writes_inverted, + }); + } - /// Returns an iterator over the component IDs that this `Access` either - /// writes or can't write. - /// - /// The returned flag specifies whether the list consists of the components - /// that the access *can* write (false) or whether the list consists of the - /// components that the access *can't* write (true). - pub(crate) fn component_writes(&self) -> (impl Iterator + '_, bool) { - ( - self.component_writes.ones().map(T::get_sparse_set_index), - self.component_writes_inverted, - ) + let reads_and_writes = self.component_read_and_writes.ones().map(|index| { + let sparse_index = T::get_sparse_set_index(index); + + if self.component_writes.contains(index) { + ComponentAccessKind::Exclusive(sparse_index) + } else { + ComponentAccessKind::Shared(sparse_index) + } + }); + + let archetypal = self + .archetypal + .ones() + .filter(|&index| { + !self.component_writes.contains(index) + && !self.component_read_and_writes.contains(index) + }) + .map(|index| ComponentAccessKind::Archetypal(T::get_sparse_set_index(index))); + + Ok(reads_and_writes.chain(archetypal)) + } +} + +/// Error returned when attempting to iterate over items included in an [`Access`] +/// if the access excludes items rather than including them. +#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] +#[error("Access is unbounded")] +pub struct UnboundedAccessError { + /// [`Access`] is defined in terms of _excluding_ [exclusive](ComponentAccessKind::Exclusive) + /// access. + pub writes_inverted: bool, + /// [`Access`] is defined in terms of _excluding_ [shared](ComponentAccessKind::Shared) and + /// [exclusive](ComponentAccessKind::Exclusive) access. + pub read_and_writes_inverted: bool, +} + +/// Describes the level of access for a particular component as defined in an [`Access`]. +#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy)] +pub enum ComponentAccessKind { + /// Archetypical access, such as `Has`. + Archetypal(T), + /// Shared access, such as `&Foo`. + Shared(T), + /// Exclusive access, such as `&mut Foo`. + Exclusive(T), +} + +impl ComponentAccessKind { + /// Gets the index of this `ComponentAccessKind`. + pub fn index(&self) -> &T { + let (Self::Archetypal(value) | Self::Shared(value) | Self::Exclusive(value)) = self; + value } } @@ -819,7 +881,7 @@ impl Access { /// otherwise would allow for queries to be considered disjoint when they shouldn't: /// - `Query<(&mut T, Option<&U>)>` read/write `T`, read `U`, with `U` /// - `Query<&mut T, Without>` read/write `T`, without `U` -/// from this we could reasonably conclude that the queries are disjoint but they aren't. +/// from this we could reasonably conclude that the queries are disjoint but they aren't. /// /// In order to solve this the actual access that `Query<(&mut T, Option<&U>)>` has /// is read/write `T`, read `U`. It must still have a read `U` access otherwise the following @@ -906,11 +968,10 @@ impl AccessConflicts { format!( "{}", ShortName( - world + &world .components - .get_info(ComponentId::get_sparse_set_index(index)) + .get_name(ComponentId::get_sparse_set_index(index)) .unwrap() - .name() ) ) }) @@ -1360,9 +1421,10 @@ impl Default for FilteredAccessSet { #[cfg(test)] mod tests { use crate::query::{ - access::AccessFilters, Access, AccessConflicts, FilteredAccess, FilteredAccessSet, + access::AccessFilters, Access, AccessConflicts, ComponentAccessKind, FilteredAccess, + FilteredAccessSet, UnboundedAccessError, }; - use alloc::vec; + use alloc::{vec, vec::Vec}; use core::marker::PhantomData; use fixedbitset::FixedBitSet; @@ -1634,4 +1696,70 @@ mod tests { assert_eq!(access_a, expected); } + + #[test] + fn try_iter_component_access_simple() { + let mut access = Access::::default(); + + access.add_component_read(1); + access.add_component_read(2); + access.add_component_write(3); + access.add_archetypal(5); + + let result = access + .try_iter_component_access() + .map(Iterator::collect::>); + + assert_eq!( + result, + Ok(vec![ + ComponentAccessKind::Shared(1), + ComponentAccessKind::Shared(2), + ComponentAccessKind::Exclusive(3), + ComponentAccessKind::Archetypal(5), + ]), + ); + } + + #[test] + fn try_iter_component_access_unbounded_write_all() { + let mut access = Access::::default(); + + access.add_component_read(1); + access.add_component_read(2); + access.write_all(); + + let result = access + .try_iter_component_access() + .map(Iterator::collect::>); + + assert_eq!( + result, + Err(UnboundedAccessError { + writes_inverted: true, + read_and_writes_inverted: true + }), + ); + } + + #[test] + fn try_iter_component_access_unbounded_read_all() { + let mut access = Access::::default(); + + access.add_component_read(1); + access.add_component_read(2); + access.read_all(); + + let result = access + .try_iter_component_access() + .map(Iterator::collect::>); + + assert_eq!( + result, + Err(UnboundedAccessError { + writes_inverted: false, + read_and_writes_inverted: true + }), + ); + } } diff --git a/crates/bevy_ecs/src/query/builder.rs b/crates/bevy_ecs/src/query/builder.rs index 65ad02111c..81819cb9ac 100644 --- a/crates/bevy_ecs/src/query/builder.rs +++ b/crates/bevy_ecs/src/query/builder.rs @@ -33,7 +33,7 @@ use super::{FilteredAccess, QueryData, QueryFilter}; /// .build(); /// /// // Consume the QueryState -/// let (entity, b) = query.single(&world); +/// let (entity, b) = query.single(&world).unwrap(); /// ``` pub struct QueryBuilder<'w, D: QueryData = (), F: QueryFilter = ()> { access: FilteredAccess, @@ -81,14 +81,14 @@ impl<'w, D: QueryData, F: QueryFilter> QueryBuilder<'w, D, F> { .is_some_and(|info| info.storage_type() == StorageType::Table) }; - let (mut component_reads_and_writes, component_reads_and_writes_inverted) = - self.access.access().component_reads_and_writes(); - if component_reads_and_writes_inverted { + let Ok(component_accesses) = self.access.access().try_iter_component_access() else { + // Access is unbounded, pessimistically assume it's sparse. return false; - } + }; - component_reads_and_writes.all(is_dense) - && self.access.access().archetypal().all(is_dense) + component_accesses + .map(|access| *access.index()) + .all(is_dense) && !self.access.access().has_read_all_components() && self.access.with_filters().all(is_dense) && self.access.without_filters().all(is_dense) @@ -297,13 +297,13 @@ mod tests { .with::() .without::() .build(); - assert_eq!(entity_a, query_a.single(&world)); + assert_eq!(entity_a, query_a.single(&world).unwrap()); let mut query_b = QueryBuilder::::new(&mut world) .with::() .without::() .build(); - assert_eq!(entity_b, query_b.single(&world)); + assert_eq!(entity_b, query_b.single(&world).unwrap()); } #[test] @@ -319,13 +319,13 @@ mod tests { .with_id(component_id_a) .without_id(component_id_c) .build(); - assert_eq!(entity_a, query_a.single(&world)); + assert_eq!(entity_a, query_a.single(&world).unwrap()); let mut query_b = QueryBuilder::::new(&mut world) .with_id(component_id_a) .without_id(component_id_b) .build(); - assert_eq!(entity_b, query_b.single(&world)); + assert_eq!(entity_b, query_b.single(&world).unwrap()); } #[test] @@ -385,7 +385,7 @@ mod tests { .data::<&B>() .build(); - let entity_ref = query.single(&world); + let entity_ref = query.single(&world).unwrap(); assert_eq!(entity, entity_ref.id()); @@ -408,7 +408,7 @@ mod tests { .ref_id(component_id_b) .build(); - let entity_ref = query.single(&world); + let entity_ref = query.single(&world).unwrap(); assert_eq!(entity, entity_ref.id()); diff --git a/crates/bevy_ecs/src/query/error.rs b/crates/bevy_ecs/src/query/error.rs index 054da37a75..6d0b149b86 100644 --- a/crates/bevy_ecs/src/query/error.rs +++ b/crates/bevy_ecs/src/query/error.rs @@ -1,40 +1,42 @@ use thiserror::Error; use crate::{ - entity::{Entity, EntityDoesNotExistDetails}, - world::unsafe_world_cell::UnsafeWorldCell, + archetype::ArchetypeId, + entity::{Entity, EntityDoesNotExistError}, }; /// An error that occurs when retrieving a specific [`Entity`]'s query result from [`Query`](crate::system::Query) or [`QueryState`](crate::query::QueryState). // TODO: return the type_name as part of this error -#[derive(Clone, Copy)] -pub enum QueryEntityError<'w> { +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum QueryEntityError { /// The given [`Entity`]'s components do not match the query. /// /// Either it does not have a requested component, or it has a component which the query filters out. - QueryDoesNotMatch(Entity, UnsafeWorldCell<'w>), + QueryDoesNotMatch(Entity, ArchetypeId), /// The given [`Entity`] does not exist. - NoSuchEntity(Entity, EntityDoesNotExistDetails), + EntityDoesNotExist(EntityDoesNotExistError), /// The [`Entity`] was requested mutably more than once. /// - /// See [`QueryState::get_many_mut`](crate::query::QueryState::get_many_mut) for an example. + /// See [`Query::get_many_mut`](crate::system::Query::get_many_mut) for an example. AliasedMutability(Entity), } -impl<'w> core::error::Error for QueryEntityError<'w> {} +impl From for QueryEntityError { + fn from(error: EntityDoesNotExistError) -> Self { + QueryEntityError::EntityDoesNotExist(error) + } +} -impl<'w> core::fmt::Display for QueryEntityError<'w> { +impl core::error::Error for QueryEntityError {} + +impl core::fmt::Display for QueryEntityError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match *self { - Self::QueryDoesNotMatch(entity, world) => { - write!( - f, - "The query does not match entity {entity}, which has components " - )?; - format_archetype(f, world, entity) + Self::QueryDoesNotMatch(entity, _) => { + write!(f, "The query does not match entity {entity}") } - Self::NoSuchEntity(entity, details) => { - write!(f, "The entity with ID {entity} {details}") + Self::EntityDoesNotExist(error) => { + write!(f, "{error}") } Self::AliasedMutability(entity) => { write!( @@ -46,59 +48,8 @@ impl<'w> core::fmt::Display for QueryEntityError<'w> { } } -impl<'w> core::fmt::Debug for QueryEntityError<'w> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match *self { - Self::QueryDoesNotMatch(entity, world) => { - write!(f, "QueryDoesNotMatch({entity} with components ")?; - format_archetype(f, world, entity)?; - write!(f, ")") - } - Self::NoSuchEntity(entity, details) => { - write!(f, "NoSuchEntity({entity} {details})") - } - Self::AliasedMutability(entity) => write!(f, "AliasedMutability({entity})"), - } - } -} - -fn format_archetype( - f: &mut core::fmt::Formatter<'_>, - world: UnsafeWorldCell<'_>, - entity: Entity, -) -> core::fmt::Result { - // We know entity is still alive - let entity = world - .get_entity(entity) - .expect("entity does not belong to world"); - for (i, component_id) in entity.archetype().components().enumerate() { - if i > 0 { - write!(f, ", ")?; - } - let name = world - .components() - .get_name(component_id) - .expect("entity does not belong to world"); - write!(f, "{}", disqualified::ShortName(name))?; - } - Ok(()) -} - -impl<'w> PartialEq for QueryEntityError<'w> { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Self::QueryDoesNotMatch(e1, _), Self::QueryDoesNotMatch(e2, _)) if e1 == e2 => true, - (Self::NoSuchEntity(e1, _), Self::NoSuchEntity(e2, _)) if e1 == e2 => true, - (Self::AliasedMutability(e1), Self::AliasedMutability(e2)) if e1 == e2 => true, - _ => false, - } - } -} - -impl<'w> Eq for QueryEntityError<'w> {} - /// An error that occurs when evaluating a [`Query`](crate::system::Query) or [`QueryState`](crate::query::QueryState) as a single expected result via -/// [`get_single`](crate::system::Query::get_single) or [`get_single_mut`](crate::system::Query::get_single_mut). +/// [`single`](crate::system::Query::single) or [`single_mut`](crate::system::Query::single_mut). #[derive(Debug, Error)] pub enum QuerySingleError { /// No entity fits the query. @@ -111,8 +62,7 @@ pub enum QuerySingleError { #[cfg(test)] mod test { - use crate::prelude::World; - use alloc::format; + use crate::{prelude::World, query::QueryEntityError}; use bevy_ecs_macros::Component; #[test] @@ -123,19 +73,18 @@ mod test { struct Present1; #[derive(Component)] struct Present2; - #[derive(Component, Debug)] + #[derive(Component, Debug, PartialEq)] struct NotPresent; - let entity = world.spawn((Present1, Present2)).id(); + let entity = world.spawn((Present1, Present2)); - let err = world - .query::<&NotPresent>() - .get(&world, entity) - .unwrap_err(); + let (entity, archetype_id) = (entity.id(), entity.archetype().id()); + + let result = world.query::<&NotPresent>().get(&world, entity); assert_eq!( - format!("{err:?}"), - "QueryDoesNotMatch(0v1 with components Present1, Present2)" + result, + Err(QueryEntityError::QueryDoesNotMatch(entity, archetype_id)) ); } } diff --git a/crates/bevy_ecs/src/query/fetch.rs b/crates/bevy_ecs/src/query/fetch.rs index bf9b41e1d4..cd632f7b14 100644 --- a/crates/bevy_ecs/src/query/fetch.rs +++ b/crates/bevy_ecs/src/query/fetch.rs @@ -1,7 +1,7 @@ use crate::{ archetype::{Archetype, Archetypes}, bundle::Bundle, - change_detection::{MaybeThinSlicePtrLocation, Ticks, TicksMut}, + change_detection::{MaybeLocation, Ticks, TicksMut}, component::{Component, ComponentId, Components, Mutable, StorageType, Tick}, entity::{Entities, Entity, EntityLocation}, query::{Access, DebugCheckedUnwrap, FilteredAccess, WorldQuery}, @@ -12,7 +12,7 @@ use crate::{ }, }; use bevy_ptr::{ThinSlicePtr, UnsafeCellDeref}; -use core::{cell::UnsafeCell, marker::PhantomData}; +use core::{cell::UnsafeCell, marker::PhantomData, panic::Location}; use smallvec::SmallVec; use variadics_please::all_tuples; @@ -265,8 +265,9 @@ use variadics_please::all_tuples; /// /// # Safety /// -/// Component access of `Self::ReadOnly` must be a subset of `Self` -/// and `Self::ReadOnly` must match exactly the same archetypes/tables as `Self` +/// - Component access of `Self::ReadOnly` must be a subset of `Self` +/// and `Self::ReadOnly` must match exactly the same archetypes/tables as `Self` +/// - `IS_READ_ONLY` must be `true` if and only if `Self: ReadOnlyQueryData` /// /// [`Query`]: crate::system::Query /// [`ReadOnly`]: Self::ReadOnly @@ -276,6 +277,9 @@ use variadics_please::all_tuples; note = "if `{Self}` is a component type, try using `&{Self}` or `&mut {Self}`" )] pub unsafe trait QueryData: WorldQuery { + /// True if this query is read-only and may not perform mutable access. + const IS_READ_ONLY: bool; + /// The read-only variant of this [`QueryData`], which satisfies the [`ReadOnlyQueryData`] trait. type ReadOnly: ReadOnlyQueryData::State>; @@ -367,6 +371,7 @@ unsafe impl WorldQuery for Entity { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl QueryData for Entity { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; type Item<'w> = Entity; @@ -443,6 +448,7 @@ unsafe impl WorldQuery for EntityLocation { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl QueryData for EntityLocation { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; type Item<'w> = EntityLocation; @@ -524,6 +530,7 @@ unsafe impl<'a> WorldQuery for EntityRef<'a> { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl<'a> QueryData for EntityRef<'a> { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; type Item<'w> = EntityRef<'w>; @@ -604,6 +611,7 @@ unsafe impl<'a> WorldQuery for EntityMut<'a> { /// SAFETY: access of `EntityRef` is a subset of `EntityMut` unsafe impl<'a> QueryData for EntityMut<'a> { + const IS_READ_ONLY: bool = false; type ReadOnly = EntityRef<'a>; type Item<'w> = EntityMut<'w>; @@ -696,6 +704,7 @@ unsafe impl<'a> WorldQuery for FilteredEntityRef<'a> { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl<'a> QueryData for FilteredEntityRef<'a> { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; type Item<'w> = FilteredEntityRef<'w>; @@ -790,6 +799,7 @@ unsafe impl<'a> WorldQuery for FilteredEntityMut<'a> { /// SAFETY: access of `FilteredEntityRef` is a subset of `FilteredEntityMut` unsafe impl<'a> QueryData for FilteredEntityMut<'a> { + const IS_READ_ONLY: bool = false; type ReadOnly = FilteredEntityRef<'a>; type Item<'w> = FilteredEntityMut<'w>; @@ -888,6 +898,7 @@ unsafe impl<'a, B> QueryData for EntityRefExcept<'a, B> where B: Bundle, { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; type Item<'w> = EntityRefExcept<'w, B>; @@ -988,6 +999,7 @@ unsafe impl<'a, B> QueryData for EntityMutExcept<'a, B> where B: Bundle, { + const IS_READ_ONLY: bool = false; type ReadOnly = EntityRefExcept<'a, B>; type Item<'w> = EntityMutExcept<'w, B>; @@ -1060,6 +1072,7 @@ unsafe impl WorldQuery for &Archetype { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl QueryData for &Archetype { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; type Item<'w> = &'w Archetype; @@ -1204,6 +1217,7 @@ unsafe impl WorldQuery for &T { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl QueryData for &T { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; type Item<'w> = &'w T; @@ -1251,7 +1265,7 @@ pub struct RefFetch<'w, T: Component> { ThinSlicePtr<'w, UnsafeCell>, ThinSlicePtr<'w, UnsafeCell>, ThinSlicePtr<'w, UnsafeCell>, - MaybeThinSlicePtrLocation<'w>, + MaybeLocation>>>, )>, // T::STORAGE_TYPE = StorageType::SparseSet // Can be `None` when the component has never been inserted @@ -1337,10 +1351,9 @@ unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> { column.get_data_slice(table.entity_count()).into(), column.get_added_ticks_slice(table.entity_count()).into(), column.get_changed_ticks_slice(table.entity_count()).into(), - #[cfg(feature = "track_location")] - column.get_changed_by_slice(table.entity_count()).into(), - #[cfg(not(feature = "track_location"))] - (), + column + .get_changed_by_slice(table.entity_count()) + .map(Into::into), )); // SAFETY: set_table is only called when T::STORAGE_TYPE = StorageType::Table unsafe { fetch.components.set_table(table_data) }; @@ -1376,6 +1389,7 @@ unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl<'__w, T: Component> QueryData for Ref<'__w, T> { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; type Item<'w> = Ref<'w, T>; @@ -1392,7 +1406,7 @@ unsafe impl<'__w, T: Component> QueryData for Ref<'__w, T> { fetch.components.extract( |table| { // SAFETY: set_table was previously called - let (table_components, added_ticks, changed_ticks, _callers) = + let (table_components, added_ticks, changed_ticks, callers) = unsafe { table.debug_checked_unwrap() }; // SAFETY: The caller ensures `table_row` is in range. @@ -1402,8 +1416,7 @@ unsafe impl<'__w, T: Component> QueryData for Ref<'__w, T> { // SAFETY: The caller ensures `table_row` is in range. let changed = unsafe { changed_ticks.get(table_row.as_usize()) }; // SAFETY: The caller ensures `table_row` is in range. - #[cfg(feature = "track_location")] - let caller = unsafe { _callers.get(table_row.as_usize()) }; + let caller = callers.map(|callers| unsafe { callers.get(table_row.as_usize()) }); Ref { value: component.deref(), @@ -1413,13 +1426,12 @@ unsafe impl<'__w, T: Component> QueryData for Ref<'__w, T> { this_run: fetch.this_run, last_run: fetch.last_run, }, - #[cfg(feature = "track_location")] - changed_by: caller.deref(), + changed_by: caller.map(|caller| caller.deref()), } }, |sparse_set| { // SAFETY: The caller ensures `entity` is in range and has the component. - let (component, ticks, _caller) = unsafe { + let (component, ticks, caller) = unsafe { sparse_set .debug_checked_unwrap() .get_with_ticks(entity) @@ -1429,8 +1441,7 @@ unsafe impl<'__w, T: Component> QueryData for Ref<'__w, T> { Ref { value: component.deref(), ticks: Ticks::from_tick_cells(ticks, fetch.last_run, fetch.this_run), - #[cfg(feature = "track_location")] - changed_by: _caller.deref(), + changed_by: caller.map(|caller| caller.deref()), } }, ) @@ -1449,7 +1460,7 @@ pub struct WriteFetch<'w, T: Component> { ThinSlicePtr<'w, UnsafeCell>, ThinSlicePtr<'w, UnsafeCell>, ThinSlicePtr<'w, UnsafeCell>, - MaybeThinSlicePtrLocation<'w>, + MaybeLocation>>>, )>, // T::STORAGE_TYPE = StorageType::SparseSet // Can be `None` when the component has never been inserted @@ -1535,10 +1546,9 @@ unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T { column.get_data_slice(table.entity_count()).into(), column.get_added_ticks_slice(table.entity_count()).into(), column.get_changed_ticks_slice(table.entity_count()).into(), - #[cfg(feature = "track_location")] - column.get_changed_by_slice(table.entity_count()).into(), - #[cfg(not(feature = "track_location"))] - (), + column + .get_changed_by_slice(table.entity_count()) + .map(Into::into), )); // SAFETY: set_table is only called when T::STORAGE_TYPE = StorageType::Table unsafe { fetch.components.set_table(table_data) }; @@ -1574,6 +1584,7 @@ unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T { /// SAFETY: access of `&T` is a subset of `&mut T` unsafe impl<'__w, T: Component> QueryData for &'__w mut T { + const IS_READ_ONLY: bool = false; type ReadOnly = &'__w T; type Item<'w> = Mut<'w, T>; @@ -1590,7 +1601,7 @@ unsafe impl<'__w, T: Component> QueryData for &'__w mut T fetch.components.extract( |table| { // SAFETY: set_table was previously called - let (table_components, added_ticks, changed_ticks, _callers) = + let (table_components, added_ticks, changed_ticks, callers) = unsafe { table.debug_checked_unwrap() }; // SAFETY: The caller ensures `table_row` is in range. @@ -1600,8 +1611,7 @@ unsafe impl<'__w, T: Component> QueryData for &'__w mut T // SAFETY: The caller ensures `table_row` is in range. let changed = unsafe { changed_ticks.get(table_row.as_usize()) }; // SAFETY: The caller ensures `table_row` is in range. - #[cfg(feature = "track_location")] - let caller = unsafe { _callers.get(table_row.as_usize()) }; + let caller = callers.map(|callers| unsafe { callers.get(table_row.as_usize()) }); Mut { value: component.deref_mut(), @@ -1611,13 +1621,12 @@ unsafe impl<'__w, T: Component> QueryData for &'__w mut T this_run: fetch.this_run, last_run: fetch.last_run, }, - #[cfg(feature = "track_location")] - changed_by: caller.deref_mut(), + changed_by: caller.map(|caller| caller.deref_mut()), } }, |sparse_set| { // SAFETY: The caller ensures `entity` is in range and has the component. - let (component, ticks, _caller) = unsafe { + let (component, ticks, caller) = unsafe { sparse_set .debug_checked_unwrap() .get_with_ticks(entity) @@ -1627,8 +1636,7 @@ unsafe impl<'__w, T: Component> QueryData for &'__w mut T Mut { value: component.assert_unique().deref_mut(), ticks: TicksMut::from_tick_cells(ticks, fetch.last_run, fetch.this_run), - #[cfg(feature = "track_location")] - changed_by: _caller.deref_mut(), + changed_by: caller.map(|caller| caller.deref_mut()), } }, ) @@ -1719,6 +1727,7 @@ unsafe impl<'__w, T: Component> WorldQuery for Mut<'__w, T> { // SAFETY: access of `Ref` is a subset of `Mut` unsafe impl<'__w, T: Component> QueryData for Mut<'__w, T> { + const IS_READ_ONLY: bool = false; type ReadOnly = Ref<'__w, T>; type Item<'w> = Mut<'w, T>; @@ -1846,6 +1855,7 @@ unsafe impl WorldQuery for Option { // SAFETY: defers to soundness of `T: WorldQuery` impl unsafe impl QueryData for Option { + const IS_READ_ONLY: bool = T::IS_READ_ONLY; type ReadOnly = Option; type Item<'w> = Option>; @@ -2009,6 +2019,7 @@ unsafe impl WorldQuery for Has { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl QueryData for Has { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; type Item<'w> = bool; @@ -2057,6 +2068,7 @@ macro_rules! impl_tuple_query_data { $(#[$meta])* // SAFETY: defers to soundness `$name: WorldQuery` impl unsafe impl<$($name: QueryData),*> QueryData for ($($name,)*) { + const IS_READ_ONLY: bool = true $(&& $name::IS_READ_ONLY)*; type ReadOnly = ($($name::ReadOnly,)*); type Item<'w> = ($($name::Item<'w>,)*); @@ -2219,6 +2231,7 @@ macro_rules! impl_anytuple_fetch { $(#[$meta])* // SAFETY: defers to soundness of `$name: WorldQuery` impl unsafe impl<$($name: QueryData),*> QueryData for AnyOf<($($name,)*)> { + const IS_READ_ONLY: bool = true $(&& $name::IS_READ_ONLY)*; type ReadOnly = AnyOf<($($name::ReadOnly,)*)>; type Item<'w> = ($(Option<$name::Item<'w>>,)*); @@ -2323,6 +2336,7 @@ unsafe impl WorldQuery for NopWorldQuery { /// SAFETY: `Self::ReadOnly` is `Self` unsafe impl QueryData for NopWorldQuery { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; type Item<'w> = (); @@ -2392,6 +2406,7 @@ unsafe impl WorldQuery for PhantomData { /// SAFETY: `Self::ReadOnly` is `Self` unsafe impl QueryData for PhantomData { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; type Item<'a> = (); diff --git a/crates/bevy_ecs/src/query/filter.rs b/crates/bevy_ecs/src/query/filter.rs index 9e4f36a965..e4e1f0fd66 100644 --- a/crates/bevy_ecs/src/query/filter.rs +++ b/crates/bevy_ecs/src/query/filter.rs @@ -66,9 +66,7 @@ use variadics_please::all_tuples; /// # bevy_ecs::system::assert_is_system(my_system); /// ``` /// -/// [`matches_component_set`]: Self::matches_component_set /// [`Query`]: crate::system::Query -/// [`State`]: Self::State /// /// # Safety /// diff --git a/crates/bevy_ecs/src/query/iter.rs b/crates/bevy_ecs/src/query/iter.rs index f68b9d45d1..fc89843493 100644 --- a/crates/bevy_ecs/src/query/iter.rs +++ b/crates/bevy_ecs/src/query/iter.rs @@ -3,7 +3,7 @@ use crate::{ archetype::{Archetype, ArchetypeEntity, Archetypes}, bundle::Bundle, component::Tick, - entity::{Entities, Entity, EntityBorrow, EntitySet, EntitySetIterator}, + entity::{ContainsEntity, Entities, Entity, EntityEquivalent, EntitySet, EntitySetIterator}, query::{ArchetypeFilter, DebugCheckedUnwrap, QueryState, StorageId}, storage::{Table, TableRow, Tables}, world::{ @@ -487,7 +487,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// # schedule.add_systems((system_1, system_2, system_3)); /// # schedule.run(&mut world); /// ``` - pub fn sort: Ord> + 'w>( + pub fn sort( self, ) -> QuerySortedIter< 'w, @@ -495,7 +495,10 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { D, F, impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, - > { + > + where + for<'lw> L::Item<'lw>: Ord, + { self.sort_impl::(|keyed_query| keyed_query.sort()) } @@ -541,7 +544,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// # schedule.add_systems((system_1)); /// # schedule.run(&mut world); /// ``` - pub fn sort_unstable: Ord> + 'w>( + pub fn sort_unstable( self, ) -> QuerySortedIter< 'w, @@ -549,7 +552,10 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { D, F, impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, - > { + > + where + for<'lw> L::Item<'lw>: Ord, + { self.sort_impl::(|keyed_query| keyed_query.sort_unstable()) } @@ -604,7 +610,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// ``` pub fn sort_by( self, - mut compare: impl FnMut(&L::Item<'w>, &L::Item<'w>) -> Ordering, + mut compare: impl FnMut(&L::Item<'_>, &L::Item<'_>) -> Ordering, ) -> QuerySortedIter< 'w, 's, @@ -636,7 +642,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// This will panic if `next` has been called on `QueryIter` before, unless the underlying `Query` is empty. pub fn sort_unstable_by( self, - mut compare: impl FnMut(&L::Item<'w>, &L::Item<'w>) -> Ordering, + mut compare: impl FnMut(&L::Item<'_>, &L::Item<'_>) -> Ordering, ) -> QuerySortedIter< 'w, 's, @@ -688,7 +694,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// #[derive(Component)] /// struct AvailableMarker; /// - /// #[derive(Component, PartialEq, Eq, PartialOrd, Ord)] + /// #[derive(Component, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] /// enum Rarity { /// Common, /// Rare, @@ -716,7 +722,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// .sort_by_key::(|entity_ref| { /// ( /// entity_ref.contains::(), - /// entity_ref.get::() + /// entity_ref.get::().copied() /// ) /// }) /// .rev() @@ -728,7 +734,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// ``` pub fn sort_by_key( self, - mut f: impl FnMut(&L::Item<'w>) -> K, + mut f: impl FnMut(&L::Item<'_>) -> K, ) -> QuerySortedIter< 'w, 's, @@ -761,7 +767,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// This will panic if `next` has been called on `QueryIter` before, unless the underlying `Query` is empty. pub fn sort_unstable_by_key( self, - mut f: impl FnMut(&L::Item<'w>) -> K, + mut f: impl FnMut(&L::Item<'_>) -> K, ) -> QuerySortedIter< 'w, 's, @@ -796,7 +802,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// This will panic if `next` has been called on `QueryIter` before, unless the underlying `Query` is empty. pub fn sort_by_cached_key( self, - mut f: impl FnMut(&L::Item<'w>) -> K, + mut f: impl FnMut(&L::Item<'_>) -> K, ) -> QuerySortedIter< 'w, 's, @@ -826,7 +832,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// This will panic if `next` has been called on `QueryIter` before, unless the underlying `Query` is empty. fn sort_impl( self, - f: impl FnOnce(&mut Vec<(L::Item<'w>, NeutralOrd)>), + f: impl FnOnce(&mut Vec<(L::Item<'_>, NeutralOrd)>), ) -> QuerySortedIter< 'w, 's, @@ -849,6 +855,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { // SAFETY: // `self.world` has permission to access the required components. // The original query iter has not been iterated on, so no items are aliased from it. + // `QueryIter::new` ensures `world` is the same one used to initialize `query_state`. let query_lens = unsafe { query_lens_state.query_unchecked_manual(world) }.into_iter(); let mut keyed_query: Vec<_> = query_lens .map(|(key, entity)| (key, NeutralOrd(entity))) @@ -1110,7 +1117,8 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> Debug /// Entities that don't match the query are skipped. /// /// This struct is created by the [`Query::iter_many`](crate::system::Query::iter_many) and [`Query::iter_many_mut`](crate::system::Query::iter_many_mut) methods. -pub struct QueryManyIter<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> { +pub struct QueryManyIter<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> +{ world: UnsafeWorldCell<'w>, entity_iter: I, entities: &'w Entities, @@ -1121,7 +1129,7 @@ pub struct QueryManyIter<'w, 's, D: QueryData, F: QueryFilter, I: Iterator, } -impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> +impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> QueryManyIter<'w, 's, D, F, I> { /// # Safety @@ -1160,7 +1168,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// It is always safe for shared access. #[inline(always)] unsafe fn fetch_next_aliased_unchecked( - entity_iter: impl Iterator, + entity_iter: impl Iterator, entities: &'w Entities, tables: &'w Tables, archetypes: &'w Archetypes, @@ -1333,7 +1341,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, > where - L::Item<'w>: Ord, + for<'lw> L::Item<'lw>: Ord, { self.sort_impl::(|keyed_query| keyed_query.sort()) } @@ -1391,7 +1399,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, > where - L::Item<'w>: Ord, + for<'lw> L::Item<'lw>: Ord, { self.sort_impl::(|keyed_query| keyed_query.sort_unstable()) } @@ -1448,7 +1456,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// ``` pub fn sort_by( self, - mut compare: impl FnMut(&L::Item<'w>, &L::Item<'w>) -> Ordering, + mut compare: impl FnMut(&L::Item<'_>, &L::Item<'_>) -> Ordering, ) -> QuerySortedManyIter< 'w, 's, @@ -1479,7 +1487,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// called on [`QueryManyIter`] before. pub fn sort_unstable_by( self, - mut compare: impl FnMut(&L::Item<'w>, &L::Item<'w>) -> Ordering, + mut compare: impl FnMut(&L::Item<'_>, &L::Item<'_>) -> Ordering, ) -> QuerySortedManyIter< 'w, 's, @@ -1531,7 +1539,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// #[derive(Component)] /// struct AvailableMarker; /// - /// #[derive(Component, PartialEq, Eq, PartialOrd, Ord)] + /// #[derive(Component, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] /// enum Rarity { /// Common, /// Rare, @@ -1561,7 +1569,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// .sort_by_key::(|entity_ref| { /// ( /// entity_ref.contains::(), - /// entity_ref.get::() + // entity_ref.get::().copied() /// ) /// }) /// .rev() @@ -1573,7 +1581,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// ``` pub fn sort_by_key( self, - mut f: impl FnMut(&L::Item<'w>) -> K, + mut f: impl FnMut(&L::Item<'_>) -> K, ) -> QuerySortedManyIter< 'w, 's, @@ -1605,7 +1613,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// called on [`QueryManyIter`] before. pub fn sort_unstable_by_key( self, - mut f: impl FnMut(&L::Item<'w>) -> K, + mut f: impl FnMut(&L::Item<'_>) -> K, ) -> QuerySortedManyIter< 'w, 's, @@ -1639,7 +1647,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// called on [`QueryManyIter`] before. pub fn sort_by_cached_key( self, - mut f: impl FnMut(&L::Item<'w>) -> K, + mut f: impl FnMut(&L::Item<'_>) -> K, ) -> QuerySortedManyIter< 'w, 's, @@ -1668,7 +1676,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// called on [`QueryManyIter`] before. fn sort_impl( self, - f: impl FnOnce(&mut Vec<(L::Item<'w>, NeutralOrd)>), + f: impl FnOnce(&mut Vec<(L::Item<'_>, NeutralOrd)>), ) -> QuerySortedManyIter< 'w, 's, @@ -1683,6 +1691,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> // SAFETY: // `self.world` has permission to access the required components. // The original query iter has not been iterated on, so no items are aliased from it. + // `QueryIter::new` ensures `world` is the same one used to initialize `query_state`. let query_lens = unsafe { query_lens_state.query_unchecked_manual(world) } .iter_many_inner(self.entity_iter); let mut keyed_query: Vec<_> = query_lens @@ -1712,7 +1721,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> } } -impl<'w, 's, D: QueryData, F: QueryFilter, I: DoubleEndedIterator> +impl<'w, 's, D: QueryData, F: QueryFilter, I: DoubleEndedIterator> QueryManyIter<'w, 's, D, F, I> { /// Get next result from the back of the query @@ -1738,7 +1747,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: DoubleEndedIterator> Iterator +impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter, I: Iterator> Iterator for QueryManyIter<'w, 's, D, F, I> { type Item = D::Item<'w>; @@ -1767,8 +1776,13 @@ impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter, I: Iterator> - DoubleEndedIterator for QueryManyIter<'w, 's, D, F, I> +impl< + 'w, + 's, + D: ReadOnlyQueryData, + F: QueryFilter, + I: DoubleEndedIterator, + > DoubleEndedIterator for QueryManyIter<'w, 's, D, F, I> { #[inline(always)] fn next_back(&mut self) -> Option { @@ -1790,8 +1804,8 @@ impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter, I: DoubleEndedIterator> FusedIterator - for QueryManyIter<'w, 's, D, F, I> +impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter, I: Iterator> + FusedIterator for QueryManyIter<'w, 's, D, F, I> { } @@ -1801,7 +1815,7 @@ unsafe impl<'w, 's, F: QueryFilter, I: EntitySetIterator> EntitySetIterator { } -impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> Debug +impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> Debug for QueryManyIter<'w, 's, D, F, I> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { @@ -2433,7 +2447,9 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIterationCursor<'w, 's, D, F> { } // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: - // QueryIter, QueryIterationCursor, QuerySortedIter, QueryManyIter, QuerySortedManyIter, QueryCombinationIter, QueryState::par_fold_init_unchecked_manual + // QueryIter, QueryIterationCursor, QuerySortedIter, QueryManyIter, QuerySortedManyIter, QueryCombinationIter, + // QueryState::par_fold_init_unchecked_manual, QueryState::par_many_fold_init_unchecked_manual, + // QueryState::par_many_unique_fold_init_unchecked_manual /// # Safety /// `tables` and `archetypes` must belong to the same world that the [`QueryIterationCursor`] /// was initialized for. @@ -2575,6 +2591,7 @@ impl Ord for NeutralOrd { } #[cfg(test)] +#[expect(clippy::print_stdout, reason = "Allowed in tests.")] mod tests { use alloc::vec::Vec; use std::println; @@ -2592,10 +2609,16 @@ mod tests { #[test] fn query_iter_sorts() { let mut world = World::new(); + for i in 0..100 { + world.spawn(A(i as f32)); + world.spawn((A(i as f32), Sparse(i))); + world.spawn(Sparse(i)); + } let mut query = world.query::(); let sort = query.iter(&world).sort::().collect::>(); + assert_eq!(sort.len(), 300); let sort_unstable = query .iter(&world) @@ -2898,13 +2921,13 @@ mod tests { { let mut query = query_state .iter_many_mut(&mut world, [id, id]) - .sort_by::<&C>(Ord::cmp); + .sort_by::<&C>(|l, r| Ord::cmp(l, r)); while query.fetch_next().is_some() {} } { let mut query = query_state .iter_many_mut(&mut world, [id, id]) - .sort_unstable_by::<&C>(Ord::cmp); + .sort_unstable_by::<&C>(|l, r| Ord::cmp(l, r)); while query.fetch_next().is_some() {} } { diff --git a/crates/bevy_ecs/src/query/mod.rs b/crates/bevy_ecs/src/query/mod.rs index 9d40dd7d01..c1744cbf24 100644 --- a/crates/bevy_ecs/src/query/mod.rs +++ b/crates/bevy_ecs/src/query/mod.rs @@ -102,6 +102,7 @@ impl DebugCheckedUnwrap for Option { } #[cfg(test)] +#[expect(clippy::print_stdout, reason = "Allowed in tests.")] mod tests { use crate::{ archetype::Archetype, @@ -111,7 +112,7 @@ mod tests { ArchetypeFilter, FilteredAccess, Has, QueryCombinationIter, QueryData, ReadOnlyQueryData, WorldQuery, }, - schedule::{IntoSystemConfigs, Schedule}, + schedule::{IntoScheduleConfigs, Schedule}, storage::{Table, TableRow}, system::{assert_is_system, IntoSystem, Query, System, SystemState}, world::{unsafe_world_cell::UnsafeWorldCell, World}, @@ -437,6 +438,18 @@ mod tests { ); } + #[test] + fn get_many_only_mut_checks_duplicates() { + let mut world = World::new(); + let id = world.spawn(A(10)).id(); + let mut query_state = world.query::<&mut A>(); + let mut query = query_state.query_mut(&mut world); + let result = query.get_many([id, id]); + assert_eq!(result, Ok([&A(10), &A(10)])); + let mut_result = query.get_many_mut([id, id]); + assert!(mut_result.is_err()); + } + #[test] fn multi_storage_query() { let mut world = World::new(); @@ -751,8 +764,8 @@ mod tests { let _: Option<&Foo> = q.get(&world, e).ok(); let _: Option<&Foo> = q.get_manual(&world, e).ok(); let _: Option<[&Foo; 1]> = q.get_many(&world, [e]).ok(); - let _: Option<&Foo> = q.get_single(&world).ok(); - let _: &Foo = q.single(&world); + let _: Option<&Foo> = q.single(&world).ok(); + let _: &Foo = q.single(&world).unwrap(); // system param let mut q = SystemState::>::new(&mut world); @@ -764,9 +777,8 @@ mod tests { let _: Option<&Foo> = q.get(e).ok(); let _: Option<[&Foo; 1]> = q.get_many([e]).ok(); - let _: Option<&Foo> = q.get_single().ok(); - let _: [&Foo; 1] = q.many([e]); - let _: &Foo = q.single(); + let _: Option<&Foo> = q.single().ok(); + let _: &Foo = q.single().unwrap(); } // regression test for https://github.com/bevyengine/bevy/pull/8029 @@ -852,7 +864,7 @@ mod tests { } fn init_state(world: &mut World) -> Self::State { - world.components.register_resource::() + world.components_registrator().register_resource::() } fn get_state(components: &Components) -> Option { @@ -869,6 +881,7 @@ mod tests { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl QueryData for ReadsRData { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; type Item<'w> = (); diff --git a/crates/bevy_ecs/src/query/par_iter.rs b/crates/bevy_ecs/src/query/par_iter.rs index b3ea93fbbc..6683238aa3 100644 --- a/crates/bevy_ecs/src/query/par_iter.rs +++ b/crates/bevy_ecs/src/query/par_iter.rs @@ -1,8 +1,13 @@ use crate::{ - batching::BatchingStrategy, component::Tick, world::unsafe_world_cell::UnsafeWorldCell, + batching::BatchingStrategy, + component::Tick, + entity::{EntityEquivalent, UniqueEntityEquivalentVec}, + world::unsafe_world_cell::UnsafeWorldCell, }; -use super::{QueryData, QueryFilter, QueryItem, QueryState}; +use super::{QueryData, QueryFilter, QueryItem, QueryState, ReadOnlyQueryData}; + +use alloc::vec::Vec; /// A parallel iterator over query results of a [`Query`](crate::system::Query). /// @@ -54,7 +59,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryParIter<'w, 's, D, F> { /// fn system(query: Query<&T>){ /// let mut queue: Parallel = Parallel::default(); /// // queue.borrow_local_mut() will get or create a thread_local queue for each task/thread; - /// query.par_iter().for_each_init(|| queue.borrow_local_mut(),|local_queue,item| { + /// query.par_iter().for_each_init(|| queue.borrow_local_mut(),|local_queue, item| { /// **local_queue += 1; /// }); /// @@ -89,7 +94,8 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryParIter<'w, 's, D, F> { // at the same time. unsafe { self.state - .iter_unchecked_manual(self.world, self.last_run, self.this_run) + .query_unchecked_manual_with_ticks(self.world, self.last_run, self.this_run) + .into_iter() .fold(init, func); } } @@ -101,7 +107,8 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryParIter<'w, 's, D, F> { // SAFETY: See the safety comment above. unsafe { self.state - .iter_unchecked_manual(self.world, self.last_run, self.this_run) + .query_unchecked_manual_with_ticks(self.world, self.last_run, self.this_run) + .into_iter() .fold(init, func); } } else { @@ -146,3 +153,311 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryParIter<'w, 's, D, F> { .calc_batch_size(max_items, thread_count) } } + +/// A parallel iterator over the unique query items generated from an [`Entity`] list. +/// +/// This struct is created by the [`Query::par_iter_many`] method. +/// +/// [`Entity`]: crate::entity::Entity +/// [`Query::par_iter_many`]: crate::system::Query::par_iter_many +pub struct QueryParManyIter<'w, 's, D: QueryData, F: QueryFilter, E: EntityEquivalent> { + pub(crate) world: UnsafeWorldCell<'w>, + pub(crate) state: &'s QueryState, + pub(crate) entity_list: Vec, + pub(crate) last_run: Tick, + pub(crate) this_run: Tick, + pub(crate) batching_strategy: BatchingStrategy, +} + +impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter, E: EntityEquivalent + Sync> + QueryParManyIter<'w, 's, D, F, E> +{ + /// Changes the batching strategy used when iterating. + /// + /// For more information on how this affects the resultant iteration, see + /// [`BatchingStrategy`]. + pub fn batching_strategy(mut self, strategy: BatchingStrategy) -> Self { + self.batching_strategy = strategy; + self + } + + /// Runs `func` on each query result in parallel. + /// + /// # Panics + /// If the [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[inline] + pub fn for_each) + Send + Sync + Clone>(self, func: FN) { + self.for_each_init(|| {}, |_, item| func(item)); + } + + /// Runs `func` on each query result in parallel on a value returned by `init`. + /// + /// `init` may be called multiple times per thread, and the values returned may be discarded between tasks on any given thread. + /// Callers should avoid using this function as if it were a parallel version + /// of [`Iterator::fold`]. + /// + /// # Example + /// + /// ``` + /// use bevy_utils::Parallel; + /// use crate::{bevy_ecs::prelude::{Component, Res, Resource, Entity}, bevy_ecs::system::Query}; + /// # use core::slice; + /// use bevy_platform::prelude::Vec; + /// # fn some_expensive_operation(_item: &T) -> usize { + /// # 0 + /// # } + /// + /// #[derive(Component)] + /// struct T; + /// + /// #[derive(Resource)] + /// struct V(Vec); + /// + /// impl<'a> IntoIterator for &'a V { + /// // ... + /// # type Item = &'a Entity; + /// # type IntoIter = slice::Iter<'a, Entity>; + /// # + /// # fn into_iter(self) -> Self::IntoIter { + /// # self.0.iter() + /// # } + /// } + /// + /// fn system(query: Query<&T>, entities: Res){ + /// let mut queue: Parallel = Parallel::default(); + /// // queue.borrow_local_mut() will get or create a thread_local queue for each task/thread; + /// query.par_iter_many(&entities).for_each_init(|| queue.borrow_local_mut(),|local_queue, item| { + /// **local_queue += some_expensive_operation(item); + /// }); + /// + /// // collect value from every thread + /// let final_value: usize = queue.iter_mut().map(|v| *v).sum(); + /// } + /// ``` + /// + /// # Panics + /// If the [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[inline] + pub fn for_each_init(self, init: INIT, func: FN) + where + FN: Fn(&mut T, QueryItem<'w, D>) + Send + Sync + Clone, + INIT: Fn() -> T + Sync + Send + Clone, + { + let func = |mut init, item| { + func(&mut init, item); + init + }; + #[cfg(any(target_arch = "wasm32", not(feature = "multi_threaded")))] + { + let init = init(); + // SAFETY: + // This method can only be called once per instance of QueryParManyIter, + // which ensures that mutable queries cannot be executed multiple times at once. + // Mutable instances of QueryParManyUniqueIter can only be created via an exclusive borrow of a + // Query or a World, which ensures that multiple aliasing QueryParManyIters cannot exist + // at the same time. + unsafe { + self.state + .query_unchecked_manual_with_ticks(self.world, self.last_run, self.this_run) + .iter_many_inner(&self.entity_list) + .fold(init, func); + } + } + #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] + { + let thread_count = bevy_tasks::ComputeTaskPool::get().thread_num(); + if thread_count <= 1 { + let init = init(); + // SAFETY: See the safety comment above. + unsafe { + self.state + .query_unchecked_manual_with_ticks(self.world, self.last_run, self.this_run) + .iter_many_inner(&self.entity_list) + .fold(init, func); + } + } else { + // Need a batch size of at least 1. + let batch_size = self.get_batch_size(thread_count).max(1); + // SAFETY: See the safety comment above. + unsafe { + self.state.par_many_fold_init_unchecked_manual( + init, + self.world, + &self.entity_list, + batch_size, + func, + self.last_run, + self.this_run, + ); + } + } + } + } + + #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] + fn get_batch_size(&self, thread_count: usize) -> usize { + self.batching_strategy + .calc_batch_size(|| self.entity_list.len(), thread_count) + } +} + +/// A parallel iterator over the unique query items generated from an [`EntitySet`]. +/// +/// This struct is created by the [`Query::par_iter_many_unique`] and [`Query::par_iter_many_unique_mut`] methods. +/// +/// [`EntitySet`]: crate::entity::EntitySet +/// [`Query::par_iter_many_unique`]: crate::system::Query::par_iter_many_unique +/// [`Query::par_iter_many_unique_mut`]: crate::system::Query::par_iter_many_unique_mut +pub struct QueryParManyUniqueIter<'w, 's, D: QueryData, F: QueryFilter, E: EntityEquivalent + Sync> +{ + pub(crate) world: UnsafeWorldCell<'w>, + pub(crate) state: &'s QueryState, + pub(crate) entity_list: UniqueEntityEquivalentVec, + pub(crate) last_run: Tick, + pub(crate) this_run: Tick, + pub(crate) batching_strategy: BatchingStrategy, +} + +impl<'w, 's, D: QueryData, F: QueryFilter, E: EntityEquivalent + Sync> + QueryParManyUniqueIter<'w, 's, D, F, E> +{ + /// Changes the batching strategy used when iterating. + /// + /// For more information on how this affects the resultant iteration, see + /// [`BatchingStrategy`]. + pub fn batching_strategy(mut self, strategy: BatchingStrategy) -> Self { + self.batching_strategy = strategy; + self + } + + /// Runs `func` on each query result in parallel. + /// + /// # Panics + /// If the [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[inline] + pub fn for_each) + Send + Sync + Clone>(self, func: FN) { + self.for_each_init(|| {}, |_, item| func(item)); + } + + /// Runs `func` on each query result in parallel on a value returned by `init`. + /// + /// `init` may be called multiple times per thread, and the values returned may be discarded between tasks on any given thread. + /// Callers should avoid using this function as if it were a parallel version + /// of [`Iterator::fold`]. + /// + /// # Example + /// + /// ``` + /// use bevy_utils::Parallel; + /// use crate::{bevy_ecs::{prelude::{Component, Res, Resource, Entity}, entity::UniqueEntityVec, system::Query}}; + /// # use core::slice; + /// # use crate::bevy_ecs::entity::UniqueEntityIter; + /// # fn some_expensive_operation(_item: &T) -> usize { + /// # 0 + /// # } + /// + /// #[derive(Component)] + /// struct T; + /// + /// #[derive(Resource)] + /// struct V(UniqueEntityVec); + /// + /// impl<'a> IntoIterator for &'a V { + /// // ... + /// # type Item = &'a Entity; + /// # type IntoIter = UniqueEntityIter>; + /// # + /// # fn into_iter(self) -> Self::IntoIter { + /// # self.0.iter() + /// # } + /// } + /// + /// fn system(query: Query<&T>, entities: Res){ + /// let mut queue: Parallel = Parallel::default(); + /// // queue.borrow_local_mut() will get or create a thread_local queue for each task/thread; + /// query.par_iter_many_unique(&entities).for_each_init(|| queue.borrow_local_mut(),|local_queue, item| { + /// **local_queue += some_expensive_operation(item); + /// }); + /// + /// // collect value from every thread + /// let final_value: usize = queue.iter_mut().map(|v| *v).sum(); + /// } + /// ``` + /// + /// # Panics + /// If the [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[inline] + pub fn for_each_init(self, init: INIT, func: FN) + where + FN: Fn(&mut T, QueryItem<'w, D>) + Send + Sync + Clone, + INIT: Fn() -> T + Sync + Send + Clone, + { + let func = |mut init, item| { + func(&mut init, item); + init + }; + #[cfg(any(target_arch = "wasm32", not(feature = "multi_threaded")))] + { + let init = init(); + // SAFETY: + // This method can only be called once per instance of QueryParManyUniqueIter, + // which ensures that mutable queries cannot be executed multiple times at once. + // Mutable instances of QueryParManyUniqueIter can only be created via an exclusive borrow of a + // Query or a World, which ensures that multiple aliasing QueryParManyUniqueIters cannot exist + // at the same time. + unsafe { + self.state + .query_unchecked_manual_with_ticks(self.world, self.last_run, self.this_run) + .iter_many_unique_inner(self.entity_list) + .fold(init, func); + } + } + #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] + { + let thread_count = bevy_tasks::ComputeTaskPool::get().thread_num(); + if thread_count <= 1 { + let init = init(); + // SAFETY: See the safety comment above. + unsafe { + self.state + .query_unchecked_manual_with_ticks(self.world, self.last_run, self.this_run) + .iter_many_unique_inner(self.entity_list) + .fold(init, func); + } + } else { + // Need a batch size of at least 1. + let batch_size = self.get_batch_size(thread_count).max(1); + // SAFETY: See the safety comment above. + unsafe { + self.state.par_many_unique_fold_init_unchecked_manual( + init, + self.world, + &self.entity_list, + batch_size, + func, + self.last_run, + self.this_run, + ); + } + } + } + } + + #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] + fn get_batch_size(&self, thread_count: usize) -> usize { + self.batching_strategy + .calc_batch_size(|| self.entity_list.len(), thread_count) + } +} diff --git a/crates/bevy_ecs/src/query/state.rs b/crates/bevy_ecs/src/query/state.rs index 0019fe3783..e9a00f4646 100644 --- a/crates/bevy_ecs/src/query/state.rs +++ b/crates/bevy_ecs/src/query/state.rs @@ -1,29 +1,28 @@ use crate::{ archetype::{Archetype, ArchetypeComponentId, ArchetypeGeneration, ArchetypeId}, - batching::BatchingStrategy, component::{ComponentId, Tick}, - entity::{Entity, EntityBorrow, EntitySet}, + entity::{Entity, EntityEquivalent, EntitySet, UniqueEntityArray}, entity_disabling::DefaultQueryFilters, prelude::FromWorld, - query::{ - Access, DebugCheckedUnwrap, FilteredAccess, QueryCombinationIter, QueryIter, QueryParIter, - WorldQuery, - }, + query::{Access, FilteredAccess, QueryCombinationIter, QueryIter, QueryParIter, WorldQuery}, storage::{SparseSetIndex, TableId}, system::Query, world::{unsafe_world_cell::UnsafeWorldCell, World, WorldId}, }; +#[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] +use crate::entity::UniqueEntityEquivalentSlice; + use alloc::vec::Vec; -use core::{fmt, mem::MaybeUninit, ptr}; +use core::{fmt, ptr}; use fixedbitset::FixedBitSet; use log::warn; #[cfg(feature = "trace")] use tracing::Span; use super::{ - NopWorldQuery, QueryBuilder, QueryData, QueryEntityError, QueryFilter, QueryManyIter, - QueryManyUniqueIter, QuerySingleError, ROQueryItem, + ComponentAccessKind, NopWorldQuery, QueryBuilder, QueryData, QueryEntityError, QueryFilter, + QueryManyIter, QueryManyUniqueIter, QuerySingleError, ROQueryItem, ReadOnlyQueryData, }; /// An ID for either a table or an archetype. Used for Query iteration. @@ -52,9 +51,9 @@ pub(super) union StorageId { /// /// This data is cached between system runs, and is used to: /// - store metadata about which [`Table`] or [`Archetype`] are matched by the query. "Matched" means -/// that the query will iterate over the data in the matched table/archetype. +/// that the query will iterate over the data in the matched table/archetype. /// - cache the [`State`] needed to compute the [`Fetch`] struct used to retrieve data -/// from a specific [`Table`] or [`Archetype`] +/// from a specific [`Table`] or [`Archetype`] /// - build iterators that can iterate over the query results /// /// [`State`]: crate::query::world_query::WorldQuery::State @@ -73,6 +72,9 @@ pub struct QueryState { pub(crate) matched_archetypes: FixedBitSet, /// [`FilteredAccess`] computed by combining the `D` and `F` access. Used to check which other queries /// this query can run in parallel with. + /// Note that because we do a zero-cost reference conversion in `Query::as_readonly`, + /// the access for a read-only query may include accesses for the original mutable version, + /// but the `Query` does not have exclusive access to those components. pub(crate) component_access: FilteredAccess, // NOTE: we maintain both a bitset and a vec because iterating the vec is faster pub(super) matched_storage_ids: Vec, @@ -133,7 +135,7 @@ impl QueryState { /// `NewD` must have a subset of the access that `D` does and match the exact same archetypes/tables /// `NewF` must have a subset of the access that `F` does and match the exact same archetypes/tables pub(crate) unsafe fn as_transmuted_state< - NewD: QueryData, + NewD: ReadOnlyQueryData, NewF: QueryFilter, >( &self, @@ -258,7 +260,7 @@ impl QueryState { let mut is_dense = D::IS_DENSE && F::IS_DENSE; if let Some(default_filters) = world.get_resource::() { - default_filters.apply(&mut component_access); + default_filters.modify_access(&mut component_access); is_dense &= default_filters.is_dense(world.components()); } @@ -293,7 +295,7 @@ impl QueryState { let mut is_dense = builder.is_dense(); if let Some(default_filters) = builder.world().get_resource::() { - default_filters.apply(&mut component_access); + default_filters.modify_access(&mut component_access); is_dense &= default_filters.is_dense(builder.world().components()); } @@ -338,7 +340,10 @@ impl QueryState { /// /// This will create read-only queries, see [`Self::query_mut`] for mutable queries. pub fn query_manual<'w, 's>(&'s self, world: &'w World) -> Query<'w, 's, D::ReadOnly, F> { - // SAFETY: We have read access to the entire world, and we call `as_readonly()` so the query only performs read access. + self.validate_world(world.id()); + // SAFETY: + // - We have read access to the entire world, and we call `as_readonly()` so the query only performs read access. + // - We called `validate_world`. unsafe { self.as_readonly() .query_unchecked_manual(world.as_unsafe_world_cell_readonly()) @@ -382,13 +387,17 @@ impl QueryState { /// /// This does not check for mutable query correctness. To be safe, make sure mutable queries /// have unique access to the components they query. + /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`] is unsound. pub unsafe fn query_unchecked_manual<'w, 's>( &'s self, world: UnsafeWorldCell<'w>, ) -> Query<'w, 's, D, F> { let last_run = world.last_change_tick(); let this_run = world.change_tick(); - // SAFETY: The caller ensured we have the correct access to the world. + // SAFETY: + // - The caller ensured we have the correct access to the world. + // - The caller ensured that the world matches. unsafe { self.query_unchecked_manual_with_ticks(world, last_run, this_run) } } @@ -405,7 +414,9 @@ impl QueryState { this_run: Tick, ) -> Query<'w, 's, D, F> { self.update_archetypes_unsafe_world_cell(world); - // SAFETY: The caller ensured we have the correct access to the world. + // SAFETY: + // - The caller ensured we have the correct access to the world. + // - We called `update_archetypes_unsafe_world_cell`, which calls `validate_world`. unsafe { self.query_unchecked_manual_with_ticks(world, last_run, this_run) } } @@ -423,16 +434,17 @@ impl QueryState { /// /// This does not check for mutable query correctness. To be safe, make sure mutable queries /// have unique access to the components they query. + /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`] is unsound. pub unsafe fn query_unchecked_manual_with_ticks<'w, 's>( &'s self, world: UnsafeWorldCell<'w>, last_run: Tick, this_run: Tick, ) -> Query<'w, 's, D, F> { - self.validate_world(world.id()); // SAFETY: // - The caller ensured we have the correct access to the world. - // - `validate_world` did not panic, so the world matches. + // - The caller ensured that the world matches. unsafe { Query::new(world, self, last_run, this_run) } } @@ -453,15 +465,16 @@ impl QueryState { pub fn is_empty(&self, world: &World, last_run: Tick, this_run: Tick) -> bool { self.validate_world(world.id()); // SAFETY: - // - We have read-only access to the entire world. - // - The world has been validated. + // - We have read access to the entire world, and `is_empty()` only performs read access. + // - We called `validate_world`. unsafe { - self.is_empty_unsafe_world_cell( + self.query_unchecked_manual_with_ticks( world.as_unsafe_world_cell_readonly(), last_run, this_run, ) } + .is_empty() } /// Returns `true` if the given [`Entity`] matches the query. @@ -469,41 +482,18 @@ impl QueryState { /// This is always guaranteed to run in `O(1)` time. #[inline] pub fn contains(&self, entity: Entity, world: &World, last_run: Tick, this_run: Tick) -> bool { - // SAFETY: NopFetch does not access any members while &self ensures no one has exclusive access - unsafe { - self.as_nop() - .get_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - entity, - last_run, - this_run, - ) - .is_ok() - } - } - - /// Checks if the query is empty for the given [`UnsafeWorldCell`]. - /// - /// # Safety - /// - /// - `world` must have permission to read any components required by this instance's `F` [`QueryFilter`]. - /// - `world` must match the one used to create this [`QueryState`]. - #[inline] - pub(crate) unsafe fn is_empty_unsafe_world_cell( - &self, - world: UnsafeWorldCell, - last_run: Tick, - this_run: Tick, - ) -> bool { + self.validate_world(world.id()); // SAFETY: - // - The caller ensures that `world` has permission to access any data used by the filter. - // - The caller ensures that the world matches. + // - We have read access to the entire world, and `is_empty()` only performs read access. + // - We called `validate_world`. unsafe { - self.as_nop() - .iter_unchecked_manual(world, last_run, this_run) - .next() - .is_none() + self.query_unchecked_manual_with_ticks( + world.as_unsafe_world_cell_readonly(), + last_run, + this_run, + ) } + .contains(entity) } /// Updates the state's internal view of the [`World`]'s archetypes. If this is not called before querying data, @@ -694,23 +684,22 @@ impl QueryState { access: &mut Access, ) { // As a fast path, we can iterate directly over the components involved - // if the `access` isn't inverted. - let (component_reads_and_writes, component_reads_and_writes_inverted) = - self.component_access.access.component_reads_and_writes(); - let (component_writes, component_writes_inverted) = - self.component_access.access.component_writes(); + // if the `access` is finite. + if let Ok(iter) = self.component_access.access.try_iter_component_access() { + iter.for_each(|component_access| { + if let Some(id) = archetype.get_archetype_component_id(*component_access.index()) { + match component_access { + ComponentAccessKind::Archetypal(_) => {} + ComponentAccessKind::Shared(_) => { + access.add_component_read(id); + } + ComponentAccessKind::Exclusive(_) => { + access.add_component_write(id); + } + } + } + }); - if !component_reads_and_writes_inverted && !component_writes_inverted { - component_reads_and_writes.for_each(|id| { - if let Some(id) = archetype.get_archetype_component_id(id) { - access.add_component_read(id); - } - }); - component_writes.for_each(|id| { - if let Some(id) = archetype.get_archetype_component_id(id) { - access.add_component_write(id); - } - }); return; } @@ -764,7 +753,21 @@ impl QueryState { let mut fetch_state = NewD::get_state(world.components()).expect("Could not create fetch_state, Please initialize all referenced components before transmuting."); let filter_state = NewF::get_state(world.components()).expect("Could not create filter_state, Please initialize all referenced components before transmuting."); - NewD::set_access(&mut fetch_state, &self.component_access); + fn to_readonly(mut access: FilteredAccess) -> FilteredAccess { + access.access_mut().clear_writes(); + access + } + + let self_access = if D::IS_READ_ONLY && self.component_access.access().has_any_write() { + // The current state was transmuted from a mutable + // `QueryData` to a read-only one. + // Ignore any write access in the current state. + &to_readonly(self.component_access.clone()) + } else { + &self.component_access + }; + + NewD::set_access(&mut fetch_state, self_access); NewD::update_component_access(&fetch_state, &mut component_access); let mut filter_component_access = FilteredAccess::default(); @@ -772,7 +775,7 @@ impl QueryState { component_access.extend(&filter_component_access); assert!( - component_access.is_subset(&self.component_access), + component_access.is_subset(self_access), "Transmuted state for {} attempts to access terms that are not allowed by original state {}.", core::any::type_name::<(NewD, NewF)>(), core::any::type_name::<(D, F)>() ); @@ -854,7 +857,31 @@ impl QueryState { let new_filter_state = NewF::get_state(world.components()) .expect("Could not create filter_state, Please initialize all referenced components before transmuting."); - NewD::set_access(&mut new_fetch_state, &self.component_access); + let mut joined_component_access = self.component_access.clone(); + joined_component_access.extend(&other.component_access); + + if D::IS_READ_ONLY && self.component_access.access().has_any_write() + || OtherD::IS_READ_ONLY && other.component_access.access().has_any_write() + { + // One of the input states was transmuted from a mutable + // `QueryData` to a read-only one. + // Ignore any write access in that current state. + // The simplest way to do this is to clear *all* writes + // and then add back in any writes that are valid + joined_component_access.access_mut().clear_writes(); + if !D::IS_READ_ONLY { + joined_component_access + .access_mut() + .extend(self.component_access.access()); + } + if !OtherD::IS_READ_ONLY { + joined_component_access + .access_mut() + .extend(other.component_access.access()); + } + } + + NewD::set_access(&mut new_fetch_state, &joined_component_access); NewD::update_component_access(&new_fetch_state, &mut component_access); let mut new_filter_component_access = FilteredAccess::default(); @@ -862,9 +889,6 @@ impl QueryState { component_access.extend(&new_filter_component_access); - let mut joined_component_access = self.component_access.clone(); - joined_component_access.extend(&other.component_access); - assert!( component_access.is_subset(&joined_component_access), "Joined state for {} attempts to access terms that are not allowed by state {} joined with {}.", @@ -928,17 +952,8 @@ impl QueryState { &mut self, world: &'w World, entity: Entity, - ) -> Result, QueryEntityError<'w>> { - self.update_archetypes(world); - // SAFETY: query is read only - unsafe { - self.as_readonly().get_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - entity, - world.last_change_tick(), - world.read_change_tick(), - ) - } + ) -> Result, QueryEntityError> { + self.query(world).get_inner(entity) } /// Returns the read-only query results for the given array of [`Entity`]. @@ -971,27 +986,53 @@ impl QueryState { /// /// let wrong_entity = Entity::from_raw(365); /// - /// assert_eq!(match query_state.get_many(&mut world, [wrong_entity]).unwrap_err() {QueryEntityError::NoSuchEntity(entity, _) => entity, _ => panic!()}, wrong_entity); + /// assert_eq!(match query_state.get_many(&mut world, [wrong_entity]).unwrap_err() {QueryEntityError::EntityDoesNotExist(error) => error.entity, _ => panic!()}, wrong_entity); /// ``` #[inline] pub fn get_many<'w, const N: usize>( &mut self, world: &'w World, entities: [Entity; N], - ) -> Result<[ROQueryItem<'w, D>; N], QueryEntityError<'w>> { - self.update_archetypes(world); + ) -> Result<[ROQueryItem<'w, D>; N], QueryEntityError> { + self.query(world).get_many_inner(entities) + } - // SAFETY: - // - We have read-only access to the entire world. - // - `update_archetypes` validates that the `World` matches. - unsafe { - self.get_many_read_only_manual( - world.as_unsafe_world_cell_readonly(), - entities, - world.last_change_tick(), - world.read_change_tick(), - ) - } + /// Returns the read-only query results for the given [`UniqueEntityArray`]. + /// + /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is + /// returned instead. + /// + /// # Examples + /// + /// ``` + /// use bevy_ecs::{prelude::*, query::QueryEntityError, entity::{EntitySetIterator, UniqueEntityArray, UniqueEntityVec}}; + /// + /// #[derive(Component, PartialEq, Debug)] + /// struct A(usize); + /// + /// let mut world = World::new(); + /// let entity_set: UniqueEntityVec = world.spawn_batch((0..3).map(A)).collect_set(); + /// let entity_set: UniqueEntityArray<3> = entity_set.try_into().unwrap(); + /// + /// world.spawn(A(73)); + /// + /// let mut query_state = world.query::<&A>(); + /// + /// let component_values = query_state.get_many_unique(&world, entity_set).unwrap(); + /// + /// assert_eq!(component_values, [&A(0), &A(1), &A(2)]); + /// + /// let wrong_entity = Entity::from_raw(365); + /// + /// assert_eq!(match query_state.get_many_unique(&mut world, UniqueEntityArray::from([wrong_entity])).unwrap_err() {QueryEntityError::EntityDoesNotExist(error) => error.entity, _ => panic!()}, wrong_entity); + /// ``` + #[inline] + pub fn get_many_unique<'w, const N: usize>( + &mut self, + world: &'w World, + entities: UniqueEntityArray, + ) -> Result<[ROQueryItem<'w, D>; N], QueryEntityError> { + self.query(world).get_many_unique_inner(entities) } /// Gets the query result for the given [`World`] and [`Entity`]. @@ -1002,19 +1043,8 @@ impl QueryState { &mut self, world: &'w mut World, entity: Entity, - ) -> Result, QueryEntityError<'w>> { - self.update_archetypes(world); - let change_tick = world.change_tick(); - let last_change_tick = world.last_change_tick(); - // SAFETY: query has unique world access - unsafe { - self.get_unchecked_manual( - world.as_unsafe_world_cell(), - entity, - last_change_tick, - change_tick, - ) - } + ) -> Result, QueryEntityError> { + self.query_mut(world).get_inner(entity) } /// Returns the query results for the given array of [`Entity`]. @@ -1051,7 +1081,7 @@ impl QueryState { /// let wrong_entity = Entity::from_raw(57); /// let invalid_entity = world.spawn_empty().id(); /// - /// assert_eq!(match query_state.get_many(&mut world, [wrong_entity]).unwrap_err() {QueryEntityError::NoSuchEntity(entity, _) => entity, _ => panic!()}, wrong_entity); + /// assert_eq!(match query_state.get_many(&mut world, [wrong_entity]).unwrap_err() {QueryEntityError::EntityDoesNotExist(error) => error.entity, _ => panic!()}, wrong_entity); /// assert_eq!(match query_state.get_many_mut(&mut world, [invalid_entity]).unwrap_err() {QueryEntityError::QueryDoesNotMatch(entity, _) => entity, _ => panic!()}, invalid_entity); /// assert_eq!(query_state.get_many_mut(&mut world, [entities[0], entities[0]]).unwrap_err(), QueryEntityError::AliasedMutability(entities[0])); /// ``` @@ -1060,21 +1090,53 @@ impl QueryState { &mut self, world: &'w mut World, entities: [Entity; N], - ) -> Result<[D::Item<'w>; N], QueryEntityError<'w>> { - self.update_archetypes(world); + ) -> Result<[D::Item<'w>; N], QueryEntityError> { + self.query_mut(world).get_many_mut_inner(entities) + } - let change_tick = world.change_tick(); - let last_change_tick = world.last_change_tick(); - // SAFETY: method requires exclusive world access - // and world has been validated via update_archetypes - unsafe { - self.get_many_unchecked_manual( - world.as_unsafe_world_cell(), - entities, - last_change_tick, - change_tick, - ) - } + /// Returns the query results for the given [`UniqueEntityArray`]. + /// + /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is + /// returned instead. + /// + /// ``` + /// use bevy_ecs::{prelude::*, query::QueryEntityError, entity::{EntitySetIterator, UniqueEntityArray, UniqueEntityVec}}; + /// + /// #[derive(Component, PartialEq, Debug)] + /// struct A(usize); + /// + /// let mut world = World::new(); + /// + /// let entity_set: UniqueEntityVec = world.spawn_batch((0..3).map(A)).collect_set(); + /// let entity_set: UniqueEntityArray<3> = entity_set.try_into().unwrap(); + /// + /// world.spawn(A(73)); + /// + /// let mut query_state = world.query::<&mut A>(); + /// + /// let mut mutable_component_values = query_state.get_many_unique_mut(&mut world, entity_set).unwrap(); + /// + /// for mut a in &mut mutable_component_values { + /// a.0 += 5; + /// } + /// + /// let component_values = query_state.get_many_unique(&world, entity_set).unwrap(); + /// + /// assert_eq!(component_values, [&A(5), &A(6), &A(7)]); + /// + /// let wrong_entity = Entity::from_raw(57); + /// let invalid_entity = world.spawn_empty().id(); + /// + /// assert_eq!(match query_state.get_many_unique(&mut world, UniqueEntityArray::from([wrong_entity])).unwrap_err() {QueryEntityError::EntityDoesNotExist(error) => error.entity, _ => panic!()}, wrong_entity); + /// assert_eq!(match query_state.get_many_unique_mut(&mut world, UniqueEntityArray::from([invalid_entity])).unwrap_err() {QueryEntityError::QueryDoesNotMatch(entity, _) => entity, _ => panic!()}, invalid_entity); + /// ``` + #[inline] + pub fn get_many_unique_mut<'w, const N: usize>( + &mut self, + world: &'w mut World, + entities: UniqueEntityArray, + ) -> Result<[D::Item<'w>; N], QueryEntityError> { + self.query_mut(world).get_many_unique_inner(entities) } /// Gets the query result for the given [`World`] and [`Entity`]. @@ -1095,17 +1157,8 @@ impl QueryState { &self, world: &'w World, entity: Entity, - ) -> Result, QueryEntityError<'w>> { - self.validate_world(world.id()); - // SAFETY: query is read only and world is validated - unsafe { - self.as_readonly().get_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - entity, - world.last_change_tick(), - world.read_change_tick(), - ) - } + ) -> Result, QueryEntityError> { + self.query_manual(world).get_inner(entity) } /// Gets the query result for the given [`World`] and [`Entity`]. @@ -1121,133 +1174,8 @@ impl QueryState { &mut self, world: UnsafeWorldCell<'w>, entity: Entity, - ) -> Result, QueryEntityError<'w>> { - self.update_archetypes_unsafe_world_cell(world); - self.get_unchecked_manual(world, entity, world.last_change_tick(), world.change_tick()) - } - - /// Gets the query result for the given [`World`] and [`Entity`], where the last change and - /// the current change tick are given. - /// - /// This is always guaranteed to run in `O(1)` time. - /// - /// # Safety - /// - /// This does not check for mutable query correctness. To be safe, make sure mutable queries - /// have unique access to the components they query. - /// - /// This must be called on the same `World` that the `Query` was generated from: - /// use `QueryState::validate_world` to verify this. - pub(crate) unsafe fn get_unchecked_manual<'w>( - &self, - world: UnsafeWorldCell<'w>, - entity: Entity, - last_run: Tick, - this_run: Tick, - ) -> Result, QueryEntityError<'w>> { - let location = world - .entities() - .get(entity) - .ok_or(QueryEntityError::NoSuchEntity( - entity, - world.entities().entity_does_not_exist_error_details(entity), - ))?; - if !self - .matched_archetypes - .contains(location.archetype_id.index()) - { - return Err(QueryEntityError::QueryDoesNotMatch(entity, world)); - } - let archetype = world - .archetypes() - .get(location.archetype_id) - .debug_checked_unwrap(); - let mut fetch = D::init_fetch(world, &self.fetch_state, last_run, this_run); - let mut filter = F::init_fetch(world, &self.filter_state, last_run, this_run); - - let table = world - .storages() - .tables - .get(location.table_id) - .debug_checked_unwrap(); - D::set_archetype(&mut fetch, &self.fetch_state, archetype, table); - F::set_archetype(&mut filter, &self.filter_state, archetype, table); - - if F::filter_fetch(&mut filter, entity, location.table_row) { - Ok(D::fetch(&mut fetch, entity, location.table_row)) - } else { - Err(QueryEntityError::QueryDoesNotMatch(entity, world)) - } - } - - /// Gets the read-only query results for the given [`World`] and array of [`Entity`], where the last change and - /// the current change tick are given. - /// - /// # Safety - /// - /// * `world` must have permission to read all of the components returned from this call. - /// No mutable references may coexist with any of the returned references. - /// * This must be called on the same `World` that the `Query` was generated from: - /// use `QueryState::validate_world` to verify this. - pub(crate) unsafe fn get_many_read_only_manual<'w, const N: usize>( - &self, - world: UnsafeWorldCell<'w>, - entities: [Entity; N], - last_run: Tick, - this_run: Tick, - ) -> Result<[ROQueryItem<'w, D>; N], QueryEntityError<'w>> { - let mut values = [(); N].map(|_| MaybeUninit::uninit()); - - for (value, entity) in core::iter::zip(&mut values, entities) { - // SAFETY: fetch is read-only and world must be validated - let item = unsafe { - self.as_readonly() - .get_unchecked_manual(world, entity, last_run, this_run)? - }; - *value = MaybeUninit::new(item); - } - - // SAFETY: Each value has been fully initialized. - Ok(values.map(|x| unsafe { x.assume_init() })) - } - - /// Gets the query results for the given [`World`] and array of [`Entity`], where the last change and - /// the current change tick are given. - /// - /// This is always guaranteed to run in `O(1)` time. - /// - /// # Safety - /// - /// This does not check for unique access to subsets of the entity-component data. - /// To be safe, make sure mutable queries have unique access to the components they query. - /// - /// This must be called on the same `World` that the `Query` was generated from: - /// use `QueryState::validate_world` to verify this. - pub(crate) unsafe fn get_many_unchecked_manual<'w, const N: usize>( - &self, - world: UnsafeWorldCell<'w>, - entities: [Entity; N], - last_run: Tick, - this_run: Tick, - ) -> Result<[D::Item<'w>; N], QueryEntityError<'w>> { - // Verify that all entities are unique - for i in 0..N { - for j in 0..i { - if entities[i] == entities[j] { - return Err(QueryEntityError::AliasedMutability(entities[i])); - } - } - } - - let mut values = [(); N].map(|_| MaybeUninit::uninit()); - - for (value, entity) in core::iter::zip(&mut values, entities) { - let item = self.get_unchecked_manual(world, entity, last_run, this_run)?; - *value = MaybeUninit::new(item); - } - - // SAFETY: Each value has been fully initialized. - Ok(values.map(|x| x.assume_init())) + ) -> Result, QueryEntityError> { + self.query_unchecked(world).get_inner(entity) } /// Returns an [`Iterator`] over the query results for the given [`World`]. @@ -1255,15 +1183,7 @@ impl QueryState { /// This can only be called for read-only queries, see [`Self::iter_mut`] for write-queries. #[inline] pub fn iter<'w, 's>(&'s mut self, world: &'w World) -> QueryIter<'w, 's, D::ReadOnly, F> { - self.update_archetypes(world); - // SAFETY: query is read only - unsafe { - self.as_readonly().iter_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.query(world).into_iter() } /// Returns an [`Iterator`] over the query results for the given [`World`]. @@ -1272,13 +1192,7 @@ impl QueryState { /// Iteration order is not guaranteed. #[inline] pub fn iter_mut<'w, 's>(&'s mut self, world: &'w mut World) -> QueryIter<'w, 's, D, F> { - self.update_archetypes(world); - let change_tick = world.change_tick(); - let last_change_tick = world.last_change_tick(); - // SAFETY: query has unique world access - unsafe { - self.iter_unchecked_manual(world.as_unsafe_world_cell(), last_change_tick, change_tick) - } + self.query_mut(world).into_iter() } /// Returns an [`Iterator`] over the query results for the given [`World`] without updating the query's archetypes. @@ -1290,15 +1204,7 @@ impl QueryState { /// This can only be called for read-only queries. #[inline] pub fn iter_manual<'w, 's>(&'s self, world: &'w World) -> QueryIter<'w, 's, D::ReadOnly, F> { - self.validate_world(world.id()); - // SAFETY: query is read only and world is validated - unsafe { - self.as_readonly().iter_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.query_manual(world).into_iter() } /// Returns an [`Iterator`] over all possible combinations of `K` query results without repetition. @@ -1330,15 +1236,7 @@ impl QueryState { &'s mut self, world: &'w World, ) -> QueryCombinationIter<'w, 's, D::ReadOnly, F, K> { - self.update_archetypes(world); - // SAFETY: query is read only - unsafe { - self.as_readonly().iter_combinations_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.query(world).iter_combinations_inner() } /// Returns an [`Iterator`] over all possible combinations of `K` query results without repetition. @@ -1363,17 +1261,7 @@ impl QueryState { &'s mut self, world: &'w mut World, ) -> QueryCombinationIter<'w, 's, D, F, K> { - self.update_archetypes(world); - let change_tick = world.change_tick(); - let last_change_tick = world.last_change_tick(); - // SAFETY: query has unique world access - unsafe { - self.iter_combinations_unchecked_manual( - world.as_unsafe_world_cell(), - last_change_tick, - change_tick, - ) - } + self.query_mut(world).iter_combinations_inner() } /// Returns an [`Iterator`] over the read-only query items generated from an [`Entity`] list. @@ -1385,21 +1273,12 @@ impl QueryState { /// /// - [`iter_many_mut`](Self::iter_many_mut) to get mutable query items. #[inline] - pub fn iter_many<'w, 's, EntityList: IntoIterator>( + pub fn iter_many<'w, 's, EntityList: IntoIterator>( &'s mut self, world: &'w World, entities: EntityList, ) -> QueryManyIter<'w, 's, D::ReadOnly, F, EntityList::IntoIter> { - self.update_archetypes(world); - // SAFETY: query is read only - unsafe { - self.as_readonly().iter_many_unchecked_manual( - entities, - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.query(world).iter_many_inner(entities) } /// Returns an [`Iterator`] over the read-only query items generated from an [`Entity`] list. @@ -1417,21 +1296,12 @@ impl QueryState { /// - [`iter_many`](Self::iter_many) to update archetypes. /// - [`iter_manual`](Self::iter_manual) to iterate over all query items. #[inline] - pub fn iter_many_manual<'w, 's, EntityList: IntoIterator>( + pub fn iter_many_manual<'w, 's, EntityList: IntoIterator>( &'s self, world: &'w World, entities: EntityList, ) -> QueryManyIter<'w, 's, D::ReadOnly, F, EntityList::IntoIter> { - self.validate_world(world.id()); - // SAFETY: query is read only, world id is validated - unsafe { - self.as_readonly().iter_many_unchecked_manual( - entities, - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.query_manual(world).iter_many_inner(entities) } /// Returns an iterator over the query items generated from an [`Entity`] list. @@ -1439,23 +1309,12 @@ impl QueryState { /// Items are returned in the order of the list of entities. /// Entities that don't match the query are skipped. #[inline] - pub fn iter_many_mut<'w, 's, EntityList: IntoIterator>( + pub fn iter_many_mut<'w, 's, EntityList: IntoIterator>( &'s mut self, world: &'w mut World, entities: EntityList, ) -> QueryManyIter<'w, 's, D, F, EntityList::IntoIter> { - self.update_archetypes(world); - let change_tick = world.change_tick(); - let last_change_tick = world.last_change_tick(); - // SAFETY: Query has unique world access. - unsafe { - self.iter_many_unchecked_manual( - entities, - world.as_unsafe_world_cell(), - last_change_tick, - change_tick, - ) - } + self.query_mut(world).iter_many_inner(entities) } /// Returns an [`Iterator`] over the unique read-only query items generated from an [`EntitySet`]. @@ -1472,16 +1331,7 @@ impl QueryState { world: &'w World, entities: EntityList, ) -> QueryManyUniqueIter<'w, 's, D::ReadOnly, F, EntityList::IntoIter> { - self.update_archetypes(world); - // SAFETY: query is read only - unsafe { - self.as_readonly().iter_many_unique_unchecked_manual( - entities, - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.query(world).iter_many_unique_inner(entities) } /// Returns an [`Iterator`] over the unique read-only query items generated from an [`EntitySet`]. @@ -1505,16 +1355,7 @@ impl QueryState { world: &'w World, entities: EntityList, ) -> QueryManyUniqueIter<'w, 's, D::ReadOnly, F, EntityList::IntoIter> { - self.validate_world(world.id()); - // SAFETY: query is read only, world id is validated - unsafe { - self.as_readonly().iter_many_unique_unchecked_manual( - entities, - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.query_manual(world).iter_many_unique_inner(entities) } /// Returns an iterator over the unique query items generated from an [`EntitySet`]. @@ -1527,18 +1368,7 @@ impl QueryState { world: &'w mut World, entities: EntityList, ) -> QueryManyUniqueIter<'w, 's, D, F, EntityList::IntoIter> { - self.update_archetypes(world); - let last_change_tick = world.last_change_tick(); - let change_tick = world.change_tick(); - // SAFETY: Query has unique world access. - unsafe { - self.iter_many_unique_unchecked_manual( - entities, - world.as_unsafe_world_cell(), - last_change_tick, - change_tick, - ) - } + self.query_mut(world).iter_many_unique_inner(entities) } /// Returns an [`Iterator`] over the query results for the given [`World`]. /// @@ -1554,8 +1384,7 @@ impl QueryState { &'s mut self, world: UnsafeWorldCell<'w>, ) -> QueryIter<'w, 's, D, F> { - self.update_archetypes_unsafe_world_cell(world); - self.iter_unchecked_manual(world, world.last_change_tick(), world.change_tick()) + self.query_unchecked(world).into_iter() } /// Returns an [`Iterator`] over all possible combinations of `K` query results for the @@ -1574,107 +1403,7 @@ impl QueryState { &'s mut self, world: UnsafeWorldCell<'w>, ) -> QueryCombinationIter<'w, 's, D, F, K> { - self.update_archetypes_unsafe_world_cell(world); - self.iter_combinations_unchecked_manual( - world, - world.last_change_tick(), - world.change_tick(), - ) - } - - /// Returns an [`Iterator`] for the given [`World`], where the last change and - /// the current change tick are given. - /// - /// This iterator is always guaranteed to return results from each matching entity once and only once. - /// Iteration order is not guaranteed. - /// - /// # Safety - /// - /// This does not check for mutable query correctness. To be safe, make sure mutable queries - /// have unique access to the components they query. - /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` - /// with a mismatched [`WorldId`] is unsound. - #[inline] - pub(crate) unsafe fn iter_unchecked_manual<'w, 's>( - &'s self, - world: UnsafeWorldCell<'w>, - last_run: Tick, - this_run: Tick, - ) -> QueryIter<'w, 's, D, F> { - QueryIter::new(world, self, last_run, this_run) - } - - /// Returns an [`Iterator`] for the given [`World`] and list of [`Entity`]'s, where the last change and - /// the current change tick are given. - /// - /// This iterator is always guaranteed to return results from each unique pair of matching entities. - /// Iteration order is not guaranteed. - /// - /// # Safety - /// - /// This does not check for mutable query correctness. To be safe, make sure mutable queries - /// have unique access to the components they query. - /// This does not check for entity uniqueness - /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` - /// with a mismatched [`WorldId`] is unsound. - #[inline] - pub(crate) unsafe fn iter_many_unchecked_manual<'w, 's, EntityList>( - &'s self, - entities: EntityList, - world: UnsafeWorldCell<'w>, - last_run: Tick, - this_run: Tick, - ) -> QueryManyIter<'w, 's, D, F, EntityList::IntoIter> - where - EntityList: IntoIterator, - { - QueryManyIter::new(world, self, entities, last_run, this_run) - } - - /// Returns an [`Iterator`] for the given [`World`] and an [`EntitySet`], where the last change and - /// the current change tick are given. - /// - /// Items are returned in the order of the list of entities. - /// Entities that don't match the query are skipped. - /// - /// # Safety - /// - /// This does not check for mutable query correctness. To be safe, make sure mutable queries - /// have unique access to the components they query. - /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` - /// with a mismatched [`WorldId`] is unsound. - #[inline] - pub(crate) unsafe fn iter_many_unique_unchecked_manual<'w, 's, EntityList: EntitySet>( - &'s self, - entities: EntityList, - world: UnsafeWorldCell<'w>, - last_run: Tick, - this_run: Tick, - ) -> QueryManyUniqueIter<'w, 's, D, F, EntityList::IntoIter> { - QueryManyUniqueIter::new(world, self, entities, last_run, this_run) - } - - /// Returns an [`Iterator`] over all possible combinations of `K` query results for the - /// given [`World`] without repetition. - /// This can only be called for read-only queries. - /// - /// This iterator is always guaranteed to return results from each unique pair of matching entities. - /// Iteration order is not guaranteed. - /// - /// # Safety - /// - /// This does not check for mutable query correctness. To be safe, make sure mutable queries - /// have unique access to the components they query. - /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` - /// with a mismatched [`WorldId`] is unsound. - #[inline] - pub(crate) unsafe fn iter_combinations_unchecked_manual<'w, 's, const K: usize>( - &'s self, - world: UnsafeWorldCell<'w>, - last_run: Tick, - this_run: Tick, - ) -> QueryCombinationIter<'w, 's, D, F, K> { - QueryCombinationIter::new(world, self, last_run, this_run) + self.query_unchecked(world).iter_combinations_inner() } /// Returns a parallel iterator over the query results for the given [`World`]. @@ -1690,14 +1419,7 @@ impl QueryState { &'s mut self, world: &'w World, ) -> QueryParIter<'w, 's, D::ReadOnly, F> { - self.update_archetypes(world); - QueryParIter { - world: world.as_unsafe_world_cell_readonly(), - state: self.as_readonly(), - last_run: world.last_change_tick(), - this_run: world.read_change_tick(), - batching_strategy: BatchingStrategy::new(), - } + self.query(world).par_iter_inner() } /// Returns a parallel iterator over the query results for the given [`World`]. @@ -1733,7 +1455,7 @@ impl QueryState { /// # let wrong_entity = Entity::from_raw(57); /// # let invalid_entity = world.spawn_empty().id(); /// - /// # assert_eq!(match query_state.get_many(&mut world, [wrong_entity]).unwrap_err() {QueryEntityError::NoSuchEntity(entity, _) => entity, _ => panic!()}, wrong_entity); + /// # assert_eq!(match query_state.get_many(&mut world, [wrong_entity]).unwrap_err() {QueryEntityError::EntityDoesNotExist(error) => error.entity, _ => panic!()}, wrong_entity); /// assert_eq!(match query_state.get_many_mut(&mut world, [invalid_entity]).unwrap_err() {QueryEntityError::QueryDoesNotMatch(entity, _) => entity, _ => panic!()}, invalid_entity); /// # assert_eq!(query_state.get_many_mut(&mut world, [entities[0], entities[0]]).unwrap_err(), QueryEntityError::AliasedMutability(entities[0])); /// ``` @@ -1746,16 +1468,7 @@ impl QueryState { /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool #[inline] pub fn par_iter_mut<'w, 's>(&'s mut self, world: &'w mut World) -> QueryParIter<'w, 's, D, F> { - self.update_archetypes(world); - let this_run = world.change_tick(); - let last_run = world.last_change_tick(); - QueryParIter { - world: world.as_unsafe_world_cell(), - state: self, - last_run, - this_run, - batching_strategy: BatchingStrategy::new(), - } + self.query_mut(world).par_iter_inner() } /// Runs `func` on each query result in parallel for the given [`World`], where the last change and @@ -1788,7 +1501,8 @@ impl QueryState { INIT: Fn() -> T + Sync + Send + Clone, { // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: - // QueryIter, QueryIterationCursor, QueryManyIter, QueryCombinationIter,QueryState::par_fold_init_unchecked_manual + // QueryIter, QueryIterationCursor, QueryManyIter, QueryCombinationIter,QueryState::par_fold_init_unchecked_manual, + // QueryState::par_many_fold_init_unchecked_manual, QueryState::par_many_unique_fold_init_unchecked_manual use arrayvec::ArrayVec; bevy_tasks::ComputeTaskPool::get().scope(|scope| { @@ -1809,7 +1523,9 @@ impl QueryState { scope.spawn(async move { #[cfg(feature = "trace")] let _span = self.par_iter_span.enter(); - let mut iter = self.iter_unchecked_manual(world, last_run, this_run); + let mut iter = self + .query_unchecked_manual_with_ticks(world, last_run, this_run) + .into_iter(); let mut accum = init_accum(); for storage_id in queue { accum = iter.fold_over_storage_range(accum, &mut func, storage_id, None); @@ -1828,7 +1544,8 @@ impl QueryState { #[cfg(feature = "trace")] let _span = self.par_iter_span.enter(); let accum = init_accum(); - self.iter_unchecked_manual(world, last_run, this_run) + self.query_unchecked_manual_with_ticks(world, last_run, this_run) + .into_iter() .fold_over_storage_range(accum, &mut func, storage_id, Some(batch)); }); } @@ -1868,65 +1585,220 @@ impl QueryState { }); } + /// Runs `func` on each query result in parallel for the given [`EntitySet`], + /// where the last change and the current change tick are given. This is faster than the + /// equivalent `iter_many_unique()` method, but cannot be chained like a normal [`Iterator`]. + /// + /// # Panics + /// The [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// # Safety + /// + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`] is unsound. + /// + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] + pub(crate) unsafe fn par_many_unique_fold_init_unchecked_manual<'w, T, FN, INIT, E>( + &self, + init_accum: INIT, + world: UnsafeWorldCell<'w>, + entity_list: &UniqueEntityEquivalentSlice, + batch_size: usize, + mut func: FN, + last_run: Tick, + this_run: Tick, + ) where + FN: Fn(T, D::Item<'w>) -> T + Send + Sync + Clone, + INIT: Fn() -> T + Sync + Send + Clone, + E: EntityEquivalent + Sync, + { + // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: + // QueryIter, QueryIterationCursor, QueryManyIter, QueryCombinationIter,QueryState::par_fold_init_unchecked_manual + // QueryState::par_many_fold_init_unchecked_manual, QueryState::par_many_unique_fold_init_unchecked_manual + + bevy_tasks::ComputeTaskPool::get().scope(|scope| { + let chunks = entity_list.chunks_exact(batch_size); + let remainder = chunks.remainder(); + + for batch in chunks { + let mut func = func.clone(); + let init_accum = init_accum.clone(); + scope.spawn(async move { + #[cfg(feature = "trace")] + let _span = self.par_iter_span.enter(); + let accum = init_accum(); + self.query_unchecked_manual_with_ticks(world, last_run, this_run) + .iter_many_unique_inner(batch) + .fold(accum, &mut func); + }); + } + + #[cfg(feature = "trace")] + let _span = self.par_iter_span.enter(); + let accum = init_accum(); + self.query_unchecked_manual_with_ticks(world, last_run, this_run) + .iter_many_unique_inner(remainder) + .fold(accum, &mut func); + }); + } +} + +impl QueryState { + /// Runs `func` on each read-only query result in parallel for the given [`Entity`] list, + /// where the last change and the current change tick are given. This is faster than the equivalent + /// `iter_many()` method, but cannot be chained like a normal [`Iterator`]. + /// + /// # Panics + /// The [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// # Safety + /// + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`] is unsound. + /// + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] + pub(crate) unsafe fn par_many_fold_init_unchecked_manual<'w, T, FN, INIT, E>( + &self, + init_accum: INIT, + world: UnsafeWorldCell<'w>, + entity_list: &[E], + batch_size: usize, + mut func: FN, + last_run: Tick, + this_run: Tick, + ) where + FN: Fn(T, D::Item<'w>) -> T + Send + Sync + Clone, + INIT: Fn() -> T + Sync + Send + Clone, + E: EntityEquivalent + Sync, + { + // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: + // QueryIter, QueryIterationCursor, QueryManyIter, QueryCombinationIter, QueryState::par_fold_init_unchecked_manual + // QueryState::par_many_fold_init_unchecked_manual, QueryState::par_many_unique_fold_init_unchecked_manual + + bevy_tasks::ComputeTaskPool::get().scope(|scope| { + let chunks = entity_list.chunks_exact(batch_size); + let remainder = chunks.remainder(); + + for batch in chunks { + let mut func = func.clone(); + let init_accum = init_accum.clone(); + scope.spawn(async move { + #[cfg(feature = "trace")] + let _span = self.par_iter_span.enter(); + let accum = init_accum(); + self.query_unchecked_manual_with_ticks(world, last_run, this_run) + .iter_many_inner(batch) + .fold(accum, &mut func); + }); + } + + #[cfg(feature = "trace")] + let _span = self.par_iter_span.enter(); + let accum = init_accum(); + self.query_unchecked_manual_with_ticks(world, last_run, this_run) + .iter_many_inner(remainder) + .fold(accum, &mut func); + }); + } +} + +impl QueryState { /// Returns a single immutable query result when there is exactly one entity matching /// the query. /// /// This can only be called for read-only queries, /// see [`single_mut`](Self::single_mut) for write-queries. /// - /// # Panics - /// - /// Panics if the number of query results is not exactly one. Use - /// [`get_single`](Self::get_single) to return a `Result` instead of panicking. - #[track_caller] - #[inline] - pub fn single<'w>(&mut self, world: &'w World) -> ROQueryItem<'w, D> { - match self.get_single(world) { - Ok(items) => items, - Err(error) => panic!("Cannot get single query result: {error}"), - } - } - - /// Returns a single immutable query result when there is exactly one entity matching - /// the query. - /// - /// This can only be called for read-only queries, - /// see [`get_single_mut`](Self::get_single_mut) for write-queries. - /// /// If the number of query results is not exactly one, a [`QuerySingleError`] is returned /// instead. + /// + /// # Example + /// + /// Sometimes, you might want to handle the error in a specific way, + /// generally by spawning the missing entity. + /// + /// ```rust + /// use bevy_ecs::prelude::*; + /// use bevy_ecs::query::QuerySingleError; + /// + /// #[derive(Component)] + /// struct A(usize); + /// + /// fn my_system(query: Query<&A>, mut commands: Commands) { + /// match query.single() { + /// Ok(a) => (), // Do something with `a` + /// Err(err) => match err { + /// QuerySingleError::NoEntities(_) => { + /// commands.spawn(A(0)); + /// } + /// QuerySingleError::MultipleEntities(_) => panic!("Multiple entities found!"), + /// }, + /// } + /// } + /// ``` + /// + /// However in most cases, this error can simply be handled with a graceful early return. + /// If this is an expected failure mode, you can do this using the `let else` pattern like so: + /// ```rust + /// use bevy_ecs::prelude::*; + /// + /// #[derive(Component)] + /// struct A(usize); + /// + /// fn my_system(query: Query<&A>) { + /// let Ok(a) = query.single() else { + /// return; + /// }; + /// + /// // Do something with `a` + /// } + /// ``` + /// + /// If this is unexpected though, you should probably use the `?` operator + /// in combination with Bevy's error handling apparatus. + /// + /// ```rust + /// use bevy_ecs::prelude::*; + /// + /// #[derive(Component)] + /// struct A(usize); + /// + /// fn my_system(query: Query<&A>) -> Result { + /// let a = query.single()?; + /// + /// // Do something with `a` + /// Ok(()) + /// } + /// ``` + /// + /// This allows you to globally control how errors are handled in your application, + /// by setting up a custom error handler. + /// See the [`bevy_ecs::error`] module docs for more information! + /// Commonly, you might want to panic on an error during development, but log the error and continue + /// execution in production. + /// + /// Simply unwrapping the [`Result`] also works, but should generally be reserved for tests. + #[inline] + pub fn single<'w>(&mut self, world: &'w World) -> Result, QuerySingleError> { + self.query(world).single_inner() + } + + /// A deprecated alias for [`QueryState::single`]. + #[deprecated(since = "0.16.0", note = "Please use `single` instead.")] #[inline] pub fn get_single<'w>( &mut self, world: &'w World, ) -> Result, QuerySingleError> { - self.update_archetypes(world); - - // SAFETY: query is read only - unsafe { - self.as_readonly().get_single_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } - } - - /// Returns a single mutable query result when there is exactly one entity matching - /// the query. - /// - /// # Panics - /// - /// Panics if the number of query results is not exactly one. Use - /// [`get_single_mut`](Self::get_single_mut) to return a `Result` instead of panicking. - #[track_caller] - #[inline] - pub fn single_mut<'w>(&mut self, world: &'w mut World) -> D::Item<'w> { - // SAFETY: query has unique world access - match self.get_single_mut(world) { - Ok(items) => items, - Err(error) => panic!("Cannot get single query result: {error}"), - } + self.single(world) } /// Returns a single mutable query result when there is exactly one entity matching @@ -1934,23 +1806,25 @@ impl QueryState { /// /// If the number of query results is not exactly one, a [`QuerySingleError`] is returned /// instead. + /// + /// # Examples + /// + /// Please see [`Query::single`] for advice on handling the error. #[inline] + pub fn single_mut<'w>( + &mut self, + world: &'w mut World, + ) -> Result, QuerySingleError> { + self.query_mut(world).single_inner() + } + + /// A deprecated alias for [`QueryState::single_mut`]. + #[deprecated(since = "0.16.0", note = "Please use `single` instead.")] pub fn get_single_mut<'w>( &mut self, world: &'w mut World, ) -> Result, QuerySingleError> { - self.update_archetypes(world); - - let change_tick = world.change_tick(); - let last_change_tick = world.last_change_tick(); - // SAFETY: query has unique world access - unsafe { - self.get_single_unchecked_manual( - world.as_unsafe_world_cell(), - last_change_tick, - change_tick, - ) - } + self.single_mut(world) } /// Returns a query result when there is exactly one entity matching the query. @@ -1963,12 +1837,11 @@ impl QueryState { /// This does not check for mutable query correctness. To be safe, make sure mutable queries /// have unique access to the components they query. #[inline] - pub unsafe fn get_single_unchecked<'w>( + pub unsafe fn single_unchecked<'w>( &mut self, world: UnsafeWorldCell<'w>, ) -> Result, QuerySingleError> { - self.update_archetypes_unsafe_world_cell(world); - self.get_single_unchecked_manual(world, world.last_change_tick(), world.change_tick()) + self.query_unchecked(world).single_inner() } /// Returns a query result when there is exactly one entity matching the query, @@ -1981,24 +1854,20 @@ impl QueryState { /// /// This does not check for mutable query correctness. To be safe, make sure mutable queries /// have unique access to the components they query. + /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`] is unsound. #[inline] - pub unsafe fn get_single_unchecked_manual<'w>( + pub unsafe fn single_unchecked_manual<'w>( &self, world: UnsafeWorldCell<'w>, last_run: Tick, this_run: Tick, ) -> Result, QuerySingleError> { - let mut query = self.iter_unchecked_manual(world, last_run, this_run); - let first = query.next(); - let extra = query.next().is_some(); - - match (first, extra) { - (Some(r), false) => Ok(r), - (None, _) => Err(QuerySingleError::NoEntities(core::any::type_name::())), - (Some(_), _) => Err(QuerySingleError::MultipleEntities(core::any::type_name::< - Self, - >())), - } + // SAFETY: + // - The caller ensured we have the correct access to the world. + // - The caller ensured that the world matches. + self.query_unchecked_manual_with_ticks(world, last_run, this_run) + .single_inner() } } @@ -2011,84 +1880,12 @@ impl From> for QueryState = (0..10).map(|_| world.spawn_empty().id()).collect(); - - let query_state = world.query::(); - - // These don't matter for the test - let last_change_tick = world.last_change_tick(); - let change_tick = world.change_tick(); - - // It's best to test get_many_unchecked_manual directly, - // as it is shared and unsafe - // We don't care about aliased mutability for the read-only equivalent - - // SAFETY: Query does not access world data. - assert!(unsafe { - query_state - .get_many_unchecked_manual::<10>( - world.as_unsafe_world_cell_readonly(), - entities.clone().try_into().unwrap(), - last_change_tick, - change_tick, - ) - .is_ok() - }); - - assert_eq!( - // SAFETY: Query does not access world data. - unsafe { - query_state - .get_many_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - [entities[0], entities[0]], - last_change_tick, - change_tick, - ) - .unwrap_err() - }, - QueryEntityError::AliasedMutability(entities[0]) - ); - - assert_eq!( - // SAFETY: Query does not access world data. - unsafe { - query_state - .get_many_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - [entities[0], entities[1], entities[0]], - last_change_tick, - change_tick, - ) - .unwrap_err() - }, - QueryEntityError::AliasedMutability(entities[0]) - ); - - assert_eq!( - // SAFETY: Query does not access world data. - unsafe { - query_state - .get_many_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - [entities[9], entities[9]], - last_change_tick, - change_tick, - ) - .unwrap_err() - }, - QueryEntityError::AliasedMutability(entities[9]) - ); - } #[test] #[should_panic] @@ -2137,7 +1934,7 @@ mod tests { let query_state = world.query::<(&A, &B)>(); let mut new_query_state = query_state.transmute::<&A>(&world); assert_eq!(new_query_state.iter(&world).len(), 1); - let a = new_query_state.single(&world); + let a = new_query_state.single(&world).unwrap(); assert_eq!(a.0, 1); } @@ -2151,7 +1948,7 @@ mod tests { let query_state = world.query_filtered::<(&A, &B), Without>(); let mut new_query_state = query_state.transmute::<&A>(&world); // even though we change the query to not have Without, we do not get the component with C. - let a = new_query_state.single(&world); + let a = new_query_state.single(&world).unwrap(); assert_eq!(a.0, 0); } @@ -2164,7 +1961,7 @@ mod tests { let q = world.query::<()>(); let mut q = q.transmute::(&world); - assert_eq!(q.single(&world), entity); + assert_eq!(q.single(&world).unwrap(), entity); } #[test] @@ -2174,7 +1971,7 @@ mod tests { let q = world.query::<&A>(); let mut new_q = q.transmute::>(&world); - assert!(new_q.single(&world).is_added()); + assert!(new_q.single(&world).unwrap().is_added()); let q = world.query::>(); let _ = q.transmute::<&A>(&world); @@ -2245,7 +2042,7 @@ mod tests { let query_state = world.query::>(); let mut new_query_state = query_state.transmute::<&A>(&world); - let x = new_query_state.single(&world); + let x = new_query_state.single(&world).unwrap(); assert_eq!(x.0, 1234); } @@ -2270,7 +2067,7 @@ mod tests { let mut query = query; // Our result is completely untyped - let entity_ref = query.single(&world); + let entity_ref = query.single(&world).unwrap(); assert_eq!(entity, entity_ref.id()); assert_eq!(0, entity_ref.get::().unwrap().0); @@ -2285,16 +2082,16 @@ mod tests { let mut query = QueryState::<(Entity, &A, Has)>::new(&mut world) .transmute_filtered::<(Entity, Has), Added>(&world); - assert_eq!((entity_a, false), query.single(&world)); + assert_eq!((entity_a, false), query.single(&world).unwrap()); world.clear_trackers(); let entity_b = world.spawn((A(0), B(0))).id(); - assert_eq!((entity_b, true), query.single(&world)); + assert_eq!((entity_b, true), query.single(&world).unwrap()); world.clear_trackers(); - assert!(query.get_single(&world).is_err()); + assert!(query.single(&world).is_err()); } #[test] @@ -2306,15 +2103,15 @@ mod tests { .transmute_filtered::>(&world); let mut change_query = QueryState::<&mut A>::new(&mut world); - assert_eq!(entity_a, detection_query.single(&world)); + assert_eq!(entity_a, detection_query.single(&world).unwrap()); world.clear_trackers(); - assert!(detection_query.get_single(&world).is_err()); + assert!(detection_query.single(&world).is_err()); - change_query.single_mut(&mut world).0 = 1; + change_query.single_mut(&mut world).unwrap().0 = 1; - assert_eq!(entity_a, detection_query.single(&world)); + assert_eq!(entity_a, detection_query.single(&world).unwrap()); } #[test] @@ -2329,6 +2126,23 @@ mod tests { let _new_query = query.transmute_filtered::>(&world); } + #[test] + #[should_panic( + expected = "Transmuted state for (&mut bevy_ecs::query::state::tests::A, ()) attempts to access terms that are not allowed by original state (&bevy_ecs::query::state::tests::A, ())." + )] + fn cannot_transmute_mutable_after_readonly() { + let mut world = World::new(); + // Calling this method would mean we had aliasing queries. + fn bad(_: Query<&mut A>, _: Query<&A>) {} + world + .run_system_once(|query: Query<&mut A>| { + let mut readonly = query.as_readonly(); + let mut lens: QueryLens<&mut A> = readonly.transmute_lens(); + bad(lens.query(), query.as_readonly()); + }) + .unwrap(); + } + // Regression test for #14629 #[test] #[should_panic] @@ -2401,7 +2215,7 @@ mod tests { let query_2 = QueryState::<&B, Without>::new(&mut world); let mut new_query: QueryState = query_1.join_filtered(&world, &query_2); - assert_eq!(new_query.single(&world), entity_ab); + assert_eq!(new_query.single(&world).unwrap(), entity_ab); } #[test] @@ -2447,6 +2261,37 @@ mod tests { let _: QueryState> = query_1.join_filtered(&world, &query_2); } + #[test] + #[should_panic( + expected = "Joined state for ((&mut bevy_ecs::query::state::tests::A, &mut bevy_ecs::query::state::tests::B), ()) attempts to access terms that are not allowed by state (&bevy_ecs::query::state::tests::A, ()) joined with (&mut bevy_ecs::query::state::tests::B, ())." + )] + fn cannot_join_mutable_after_readonly() { + let mut world = World::new(); + // Calling this method would mean we had aliasing queries. + fn bad(_: Query<(&mut A, &mut B)>, _: Query<&A>) {} + world + .run_system_once(|query_a: Query<&mut A>, mut query_b: Query<&mut B>| { + let mut readonly = query_a.as_readonly(); + let mut lens: QueryLens<(&mut A, &mut B)> = readonly.join(&mut query_b); + bad(lens.query(), query_a.as_readonly()); + }) + .unwrap(); + } + + #[test] + fn join_to_filtered_entity_mut() { + let mut world = World::new(); + world.spawn((A(2), B(3))); + + let query_1 = QueryState::<&mut A>::new(&mut world); + let query_2 = QueryState::<&mut B>::new(&mut world); + let mut new_query: QueryState = query_1.join(&world, &query_2); + + let mut entity = new_query.single_mut(&mut world).unwrap(); + assert!(entity.get_mut::().is_some()); + assert!(entity.get_mut::().is_some()); + } + #[test] fn query_respects_default_filters() { let mut world = World::new(); @@ -2454,8 +2299,8 @@ mod tests { world.spawn((B(0), C(0))); world.spawn(C(0)); - let mut df = DefaultQueryFilters::default(); - df.set_disabled(world.register_component::()); + let mut df = DefaultQueryFilters::empty(); + df.register_disabling_component(world.register_component::()); world.insert_resource(df); // Without only matches the first entity @@ -2494,8 +2339,8 @@ mod tests { assert!(query.is_dense); assert_eq!(3, query.iter(&world).count()); - let mut df = DefaultQueryFilters::default(); - df.set_disabled(world.register_component::()); + let mut df = DefaultQueryFilters::empty(); + df.register_disabling_component(world.register_component::()); world.insert_resource(df); let mut query = QueryState::<()>::new(&mut world); @@ -2504,8 +2349,8 @@ mod tests { assert!(!query.is_dense); assert_eq!(1, query.iter(&world).count()); - let mut df = DefaultQueryFilters::default(); - df.set_disabled(world.register_component::()); + let mut df = DefaultQueryFilters::empty(); + df.register_disabling_component(world.register_component::
()); world.insert_resource(df); let mut query = QueryState::<()>::new(&mut world); diff --git a/crates/bevy_ecs/src/reflect/bundle.rs b/crates/bevy_ecs/src/reflect/bundle.rs index b7acf69d6a..ee02aff86e 100644 --- a/crates/bevy_ecs/src/reflect/bundle.rs +++ b/crates/bevy_ecs/src/reflect/bundle.rs @@ -11,6 +11,7 @@ use crate::{ bundle::BundleFromComponents, entity::EntityMapper, prelude::Bundle, + relationship::RelationshipHookMode, world::{EntityMut, EntityWorldMut}, }; use bevy_reflect::{ @@ -36,8 +37,13 @@ pub struct ReflectBundleFns { /// Function pointer implementing [`ReflectBundle::apply`]. pub apply: fn(EntityMut, &dyn PartialReflect, &TypeRegistry), /// Function pointer implementing [`ReflectBundle::apply_or_insert_mapped`]. - pub apply_or_insert_mapped: - fn(&mut EntityWorldMut, &dyn PartialReflect, &TypeRegistry, &mut dyn EntityMapper), + pub apply_or_insert_mapped: fn( + &mut EntityWorldMut, + &dyn PartialReflect, + &TypeRegistry, + &mut dyn EntityMapper, + RelationshipHookMode, + ), /// Function pointer implementing [`ReflectBundle::remove`]. pub remove: fn(&mut EntityWorldMut), /// Function pointer implementing [`ReflectBundle::take`]. @@ -87,8 +93,9 @@ impl ReflectBundle { bundle: &dyn PartialReflect, registry: &TypeRegistry, mapper: &mut dyn EntityMapper, + relationship_hook_mode: RelationshipHookMode, ) { - (self.0.apply_or_insert_mapped)(entity, bundle, registry, mapper); + (self.0.apply_or_insert_mapped)(entity, bundle, registry, mapper, relationship_hook_mode); } /// Removes this [`Bundle`] type from the entity. Does nothing if it doesn't exist. @@ -170,7 +177,11 @@ impl FromType for Refl } } }, - apply_or_insert_mapped: |entity, reflected_bundle, registry, mapper| { + apply_or_insert_mapped: |entity, + reflected_bundle, + registry, + mapper, + relationship_hook_mode| { if let Some(reflect_component) = registry.get_type_data::(TypeId::of::()) { @@ -179,14 +190,27 @@ impl FromType for Refl reflected_bundle, registry, mapper, + relationship_hook_mode, ); } else { match reflected_bundle.reflect_ref() { ReflectRef::Struct(bundle) => bundle.iter_fields().for_each(|field| { - apply_or_insert_field_mapped(entity, field, registry, mapper); + apply_or_insert_field_mapped( + entity, + field, + registry, + mapper, + relationship_hook_mode, + ); }), ReflectRef::Tuple(bundle) => bundle.iter_fields().for_each(|field| { - apply_or_insert_field_mapped(entity, field, registry, mapper); + apply_or_insert_field_mapped( + entity, + field, + registry, + mapper, + relationship_hook_mode, + ); }), _ => panic!( "expected bundle `{}` to be a named struct or tuple", @@ -232,6 +256,7 @@ fn apply_or_insert_field_mapped( field: &dyn PartialReflect, registry: &TypeRegistry, mapper: &mut dyn EntityMapper, + relationship_hook_mode: RelationshipHookMode, ) { let Some(type_id) = field.try_as_reflect().map(Any::type_id) else { panic!( @@ -241,9 +266,21 @@ fn apply_or_insert_field_mapped( }; if let Some(reflect_component) = registry.get_type_data::(type_id) { - reflect_component.apply_or_insert_mapped(entity, field, registry, mapper); + reflect_component.apply_or_insert_mapped( + entity, + field, + registry, + mapper, + relationship_hook_mode, + ); } else if let Some(reflect_bundle) = registry.get_type_data::(type_id) { - reflect_bundle.apply_or_insert_mapped(entity, field, registry, mapper); + reflect_bundle.apply_or_insert_mapped( + entity, + field, + registry, + mapper, + relationship_hook_mode, + ); } else { let is_component = entity.world().components().get_id(type_id).is_some(); diff --git a/crates/bevy_ecs/src/reflect/component.rs b/crates/bevy_ecs/src/reflect/component.rs index bffd2e9c29..893e9b13fa 100644 --- a/crates/bevy_ecs/src/reflect/component.rs +++ b/crates/bevy_ecs/src/reflect/component.rs @@ -63,6 +63,7 @@ use crate::{ component::{ComponentId, ComponentMutability}, entity::{Entity, EntityMapper}, prelude::Component, + relationship::RelationshipHookMode, world::{ unsafe_world_cell::UnsafeEntityCell, EntityMut, EntityWorldMut, FilteredEntityMut, FilteredEntityRef, World, @@ -105,8 +106,13 @@ pub struct ReflectComponentFns { /// Function pointer implementing [`ReflectComponent::apply()`]. pub apply: fn(EntityMut, &dyn PartialReflect), /// Function pointer implementing [`ReflectComponent::apply_or_insert_mapped()`]. - pub apply_or_insert_mapped: - fn(&mut EntityWorldMut, &dyn PartialReflect, &TypeRegistry, &mut dyn EntityMapper), + pub apply_or_insert_mapped: fn( + &mut EntityWorldMut, + &dyn PartialReflect, + &TypeRegistry, + &mut dyn EntityMapper, + RelationshipHookMode, + ), /// Function pointer implementing [`ReflectComponent::remove()`]. pub remove: fn(&mut EntityWorldMut), /// Function pointer implementing [`ReflectComponent::contains()`]. @@ -115,10 +121,8 @@ pub struct ReflectComponentFns { pub reflect: fn(FilteredEntityRef) -> Option<&dyn Reflect>, /// Function pointer implementing [`ReflectComponent::reflect_mut()`]. pub reflect_mut: fn(FilteredEntityMut) -> Option>, - /// Function pointer implementing [`ReflectComponent::visit_entities()`]. - pub visit_entities: fn(&dyn Reflect, &mut dyn FnMut(Entity)), - /// Function pointer implementing [`ReflectComponent::visit_entities_mut()`]. - pub visit_entities_mut: fn(&mut dyn Reflect, &mut dyn FnMut(&mut Entity)), + /// Function pointer implementing [`ReflectComponent::map_entities()`]. + pub map_entities: fn(&mut dyn Reflect, &mut dyn EntityMapper), /// Function pointer implementing [`ReflectComponent::reflect_unchecked_mut()`]. /// /// # Safety @@ -174,8 +178,9 @@ impl ReflectComponent { component: &dyn PartialReflect, registry: &TypeRegistry, map: &mut dyn EntityMapper, + relationship_hook_mode: RelationshipHookMode, ) { - (self.0.apply_or_insert_mapped)(entity, component, registry, map); + (self.0.apply_or_insert_mapped)(entity, component, registry, map, relationship_hook_mode); } /// Removes this [`Component`] type from the entity. Does nothing if it doesn't exist. @@ -284,18 +289,9 @@ impl ReflectComponent { &self.0 } - /// Calls a dynamic version of [`Component::visit_entities`]. - pub fn visit_entities(&self, component: &dyn Reflect, func: &mut dyn FnMut(Entity)) { - (self.0.visit_entities)(component, func); - } - - /// Calls a dynamic version of [`Component::visit_entities_mut`]. - pub fn visit_entities_mut( - &self, - component: &mut dyn Reflect, - func: &mut dyn FnMut(&mut Entity), - ) { - (self.0.visit_entities_mut)(component, func); + /// Calls a dynamic version of [`Component::map_entities`]. + pub fn map_entities(&self, component: &mut dyn Reflect, func: &mut dyn EntityMapper) { + (self.0.map_entities)(component, func); } } @@ -320,29 +316,30 @@ impl FromType for ReflectComponent { let mut component = unsafe { entity.get_mut_assume_mutable::() }.unwrap(); component.apply(reflected_component); }, - apply_or_insert_mapped: |entity, reflected_component, registry, mapper| { - // TODO: if we can externalize this impl to cut down on monomorphization that would be great - let map_fn = move |entity: &mut Entity| { - *entity = mapper.get_mapped(*entity); - }; + apply_or_insert_mapped: |entity, + reflected_component, + registry, + mut mapper, + relationship_hook_mode| { if C::Mutability::MUTABLE { // SAFETY: guard ensures `C` is a mutable component if let Some(mut component) = unsafe { entity.get_mut_assume_mutable::() } { component.apply(reflected_component.as_partial_reflect()); - C::visit_entities_mut(&mut component, map_fn); + C::map_entities(&mut component, &mut mapper); } else { let mut component = entity.world_scope(|world| { from_reflect_with_fallback::(reflected_component, world, registry) }); - C::visit_entities_mut(&mut component, map_fn); - entity.insert(component); + C::map_entities(&mut component, &mut mapper); + entity + .insert_with_relationship_hook_mode(component, relationship_hook_mode); } } else { let mut component = entity.world_scope(|world| { from_reflect_with_fallback::(reflected_component, world, registry) }); - C::visit_entities_mut(&mut component, map_fn); - entity.insert(component); + C::map_entities(&mut component, &mut mapper); + entity.insert_with_relationship_hook_mode(component, relationship_hook_mode); } }, remove: |entity| { @@ -386,13 +383,9 @@ impl FromType for ReflectComponent { register_component: |world: &mut World| -> ComponentId { world.register_component::() }, - visit_entities: |reflect: &dyn Reflect, func: &mut dyn FnMut(Entity)| { - let component = reflect.downcast_ref::().unwrap(); - Component::visit_entities(component, func); - }, - visit_entities_mut: |reflect: &mut dyn Reflect, func: &mut dyn FnMut(&mut Entity)| { + map_entities: |reflect: &mut dyn Reflect, mut mapper: &mut dyn EntityMapper| { let component = reflect.downcast_mut::().unwrap(); - Component::visit_entities_mut(component, func); + Component::map_entities(component, &mut mapper); }, }) } diff --git a/crates/bevy_ecs/src/reflect/entity_commands.rs b/crates/bevy_ecs/src/reflect/entity_commands.rs index 9aef8105a5..20c5e16c6d 100644 --- a/crates/bevy_ecs/src/reflect/entity_commands.rs +++ b/crates/bevy_ecs/src/reflect/entity_commands.rs @@ -20,7 +20,7 @@ pub trait ReflectCommandExt { /// /// - If the entity doesn't exist. /// - If [`AppTypeRegistry`] does not have the reflection data for the given - /// [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle). + /// [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle). /// - If the component or bundle data is invalid. See [`PartialReflect::apply`] for further details. /// - If [`AppTypeRegistry`] is not present in the [`World`]. /// @@ -82,7 +82,7 @@ pub trait ReflectCommandExt { /// // use the insert_reflect entity command to insert that component/bundle into an entity. /// commands /// .spawn_empty() - /// .insert_reflect(prefab.data.clone_value()); + /// .insert_reflect(prefab.data.reflect_clone().unwrap().into_partial_reflect()); /// } /// ``` fn insert_reflect(&mut self, component: Box) -> &mut Self; @@ -212,7 +212,7 @@ impl<'w> EntityWorldMut<'w> { /// /// - If the entity has been despawned while this `EntityWorldMut` is still alive. /// - If [`AppTypeRegistry`] does not have the reflection data for the given - /// [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle). + /// [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle). /// - If the component or bundle data is invalid. See [`PartialReflect::apply`] for further details. /// - If [`AppTypeRegistry`] is not present in the [`World`]. /// @@ -243,7 +243,7 @@ impl<'w> EntityWorldMut<'w> { /// /// - If the entity has been despawned while this `EntityWorldMut` is still alive. /// - If the given [`Resource`] does not have the reflection data for the given - /// [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle). + /// [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle). /// - If the component or bundle data is invalid. See [`PartialReflect::apply`] for further details. /// - If the given [`Resource`] is not present in the [`World`]. pub fn insert_reflect_with_registry>( @@ -442,21 +442,30 @@ mod tests { let entity = commands.spawn_empty().id(); let entity2 = commands.spawn_empty().id(); + let entity3 = commands.spawn_empty().id(); let boxed_reflect_component_a = Box::new(ComponentA(916)) as Box; - let boxed_reflect_component_a_clone = boxed_reflect_component_a.clone_value(); + let boxed_reflect_component_a_clone = boxed_reflect_component_a.reflect_clone().unwrap(); + let boxed_reflect_component_a_dynamic = boxed_reflect_component_a.to_dynamic(); commands .entity(entity) .insert_reflect(boxed_reflect_component_a); commands .entity(entity2) - .insert_reflect(boxed_reflect_component_a_clone); + .insert_reflect(boxed_reflect_component_a_clone.into_partial_reflect()); + commands + .entity(entity3) + .insert_reflect(boxed_reflect_component_a_dynamic); system_state.apply(&mut world); assert_eq!( world.entity(entity).get::(), - world.entity(entity2).get::() + world.entity(entity2).get::(), + ); + assert_eq!( + world.entity(entity).get::(), + world.entity(entity3).get::(), ); } diff --git a/crates/bevy_ecs/src/reflect/mod.rs b/crates/bevy_ecs/src/reflect/mod.rs index 4d94945afc..b630f58719 100644 --- a/crates/bevy_ecs/src/reflect/mod.rs +++ b/crates/bevy_ecs/src/reflect/mod.rs @@ -17,7 +17,6 @@ mod entity_commands; mod from_world; mod map_entities; mod resource; -mod visit_entities; pub use bundle::{ReflectBundle, ReflectBundleFns}; pub use component::{ReflectComponent, ReflectComponentFns}; @@ -25,7 +24,6 @@ pub use entity_commands::ReflectCommandExt; pub use from_world::{ReflectFromWorld, ReflectFromWorldFns}; pub use map_entities::ReflectMapEntities; pub use resource::{ReflectResource, ReflectResourceFns}; -pub use visit_entities::{ReflectVisitEntities, ReflectVisitEntitiesMut}; /// A [`Resource`] storing [`TypeRegistry`] for /// type registrations relevant to a whole app. diff --git a/crates/bevy_ecs/src/reflect/resource.rs b/crates/bevy_ecs/src/reflect/resource.rs index 238fd5f4e7..60cf7bc609 100644 --- a/crates/bevy_ecs/src/reflect/resource.rs +++ b/crates/bevy_ecs/src/reflect/resource.rs @@ -8,7 +8,10 @@ use crate::{ change_detection::Mut, component::ComponentId, resource::Resource, - world::{unsafe_world_cell::UnsafeWorldCell, World}, + world::{ + error::ResourceFetchError, unsafe_world_cell::UnsafeWorldCell, FilteredResources, + FilteredResourcesMut, World, + }, }; use bevy_reflect::{FromReflect, FromType, PartialReflect, Reflect, TypePath, TypeRegistry}; @@ -52,7 +55,12 @@ pub struct ReflectResourceFns { /// Function pointer implementing [`ReflectResource::remove()`]. pub remove: fn(&mut World), /// Function pointer implementing [`ReflectResource::reflect()`]. - pub reflect: fn(&World) -> Option<&dyn Reflect>, + pub reflect: + for<'w> fn(FilteredResources<'w, '_>) -> Result<&'w dyn Reflect, ResourceFetchError>, + /// Function pointer implementing [`ReflectResource::reflect_mut()`]. + pub reflect_mut: for<'w> fn( + FilteredResourcesMut<'w, '_>, + ) -> Result, ResourceFetchError>, /// Function pointer implementing [`ReflectResource::reflect_unchecked_mut()`]. /// /// # Safety @@ -111,14 +119,23 @@ impl ReflectResource { } /// Gets the value of this [`Resource`] type from the world as a reflected reference. - pub fn reflect<'a>(&self, world: &'a World) -> Option<&'a dyn Reflect> { - (self.0.reflect)(world) + /// + /// Note that [`&World`](World) is a valid type for `resources`. + pub fn reflect<'w, 's>( + &self, + resources: impl Into>, + ) -> Result<&'w dyn Reflect, ResourceFetchError> { + (self.0.reflect)(resources.into()) } /// Gets the value of this [`Resource`] type from the world as a mutable reflected reference. - pub fn reflect_mut<'a>(&self, world: &'a mut World) -> Option> { - // SAFETY: unique world access - unsafe { (self.0.reflect_unchecked_mut)(world.as_unsafe_world_cell()) } + /// + /// Note that [`&mut World`](World) is a valid type for `resources`. + pub fn reflect_mut<'w, 's>( + &self, + resources: impl Into>, + ) -> Result, ResourceFetchError> { + (self.0.reflect_mut)(resources.into()) } /// # Safety @@ -212,7 +229,12 @@ impl FromType for ReflectResource { remove: |world| { world.remove_resource::(); }, - reflect: |world| world.get_resource::().map(|res| res as &dyn Reflect), + reflect: |world| world.get::().map(|res| res.into_inner() as &dyn Reflect), + reflect_mut: |world| { + world + .into_mut::() + .map(|res| res.map_unchanged(|value| value as &mut dyn Reflect)) + }, reflect_unchecked_mut: |world| { // SAFETY: all usages of `reflect_unchecked_mut` guarantee that there is either a single mutable // reference or multiple immutable ones alive at any given point diff --git a/crates/bevy_ecs/src/reflect/visit_entities.rs b/crates/bevy_ecs/src/reflect/visit_entities.rs deleted file mode 100644 index 11f02612ba..0000000000 --- a/crates/bevy_ecs/src/reflect/visit_entities.rs +++ /dev/null @@ -1,62 +0,0 @@ -use crate::entity::{Entity, VisitEntities, VisitEntitiesMut}; -use bevy_reflect::{FromReflect, FromType, PartialReflect}; - -/// For a reflected value, apply an operation to all contained entities. -/// -/// See [`VisitEntities`] for more details. -#[derive(Clone)] -pub struct ReflectVisitEntities { - visit_entities: fn(&dyn PartialReflect, &mut dyn FnMut(Entity)), -} - -impl ReflectVisitEntities { - /// A general method for applying an operation to all entities in a - /// reflected component. - pub fn visit_entities(&self, component: &dyn PartialReflect, f: &mut dyn FnMut(Entity)) { - (self.visit_entities)(component, f); - } -} - -impl FromType for ReflectVisitEntities { - fn from_type() -> Self { - ReflectVisitEntities { - visit_entities: |component, f| { - let concrete = C::from_reflect(component).unwrap(); - concrete.visit_entities(f); - }, - } - } -} - -/// For a reflected value, apply an operation to mutable references to all -/// contained entities. -/// -/// See [`VisitEntitiesMut`] for more details. -#[derive(Clone)] -pub struct ReflectVisitEntitiesMut { - visit_entities_mut: fn(&mut dyn PartialReflect, &mut dyn FnMut(&mut Entity)), -} - -impl ReflectVisitEntitiesMut { - /// A general method for applying an operation to all entities in a - /// reflected component. - pub fn visit_entities( - &self, - component: &mut dyn PartialReflect, - f: &mut dyn FnMut(&mut Entity), - ) { - (self.visit_entities_mut)(component, f); - } -} - -impl FromType for ReflectVisitEntitiesMut { - fn from_type() -> Self { - ReflectVisitEntitiesMut { - visit_entities_mut: |component, f| { - let mut concrete = C::from_reflect(component).unwrap(); - concrete.visit_entities_mut(f); - component.apply(&concrete); - }, - } - } -} diff --git a/crates/bevy_ecs/src/relationship/mod.rs b/crates/bevy_ecs/src/relationship/mod.rs index b11e2f5bdd..9a2a2a2d5a 100644 --- a/crates/bevy_ecs/src/relationship/mod.rs +++ b/crates/bevy_ecs/src/relationship/mod.rs @@ -12,12 +12,9 @@ pub use relationship_source_collection::*; use crate::{ component::{Component, HookContext, Mutable}, - entity::{ComponentCloneCtx, Entity}, - system::{ - command::HandleError, - entity_command::{self, CommandWithEntity}, - error_handler, Commands, - }, + entity::{ComponentCloneCtx, Entity, SourceComponent}, + error::{ignore, CommandWithEntity, HandleError}, + system::entity_command::{self}, world::{DeferredWorld, EntityWorldMut}, }; use log::warn; @@ -35,12 +32,24 @@ use log::warn; /// /// [`Relationship`] and [`RelationshipTarget`] should always be derived via the [`Component`] trait to ensure the hooks are set up properly. /// +/// ## Derive +/// +/// [`Relationship`] and [`RelationshipTarget`] can only be derived for structs with a single unnamed field, single named field +/// or for named structs where one field is annotated with `#[relationship]`. +/// If there are additional fields, they must all implement [`Default`]. +/// +/// [`RelationshipTarget`] also requires that the relationship field is private to prevent direct mutation, +/// ensuring the correctness of relationships. /// ``` /// # use bevy_ecs::component::Component; /// # use bevy_ecs::entity::Entity; /// #[derive(Component)] /// #[relationship(relationship_target = Children)] -/// pub struct ChildOf(pub Entity); +/// pub struct ChildOf { +/// #[relationship] +/// pub parent: Entity, +/// internal: u8, +/// }; /// /// #[derive(Component)] /// #[relationship_target(relationship = ChildOf)] @@ -73,7 +82,24 @@ pub trait Relationship: Component + Sized { fn from(entity: Entity) -> Self; /// The `on_insert` component hook that maintains the [`Relationship`] / [`RelationshipTarget`] connection. - fn on_insert(mut world: DeferredWorld, HookContext { entity, caller, .. }: HookContext) { + fn on_insert( + mut world: DeferredWorld, + HookContext { + entity, + caller, + relationship_hook_mode, + .. + }: HookContext, + ) { + match relationship_hook_mode { + RelationshipHookMode::Run => {} + RelationshipHookMode::Skip => return, + RelationshipHookMode::RunIfNotLinked => { + if ::LINKED_SPAWN { + return; + } + } + } let target_entity = world.entity(entity).get::().unwrap().get(); if target_entity == entity { warn!( @@ -108,7 +134,23 @@ pub trait Relationship: Component + Sized { /// The `on_replace` component hook that maintains the [`Relationship`] / [`RelationshipTarget`] connection. // note: think of this as "on_drop" - fn on_replace(mut world: DeferredWorld, HookContext { entity, .. }: HookContext) { + fn on_replace( + mut world: DeferredWorld, + HookContext { + entity, + relationship_hook_mode, + .. + }: HookContext, + ) { + match relationship_hook_mode { + RelationshipHookMode::Run => {} + RelationshipHookMode::Skip => return, + RelationshipHookMode::RunIfNotLinked => { + if ::LINKED_SPAWN { + return; + } + } + } let target_entity = world.entity(entity).get::().unwrap().get(); if let Ok(mut target_entity_mut) = world.get_entity_mut(target_entity) { if let Some(mut relationship_target) = @@ -116,7 +158,7 @@ pub trait Relationship: Component + Sized { { relationship_target.collection_mut_risky().remove(entity); if relationship_target.len() == 0 { - if let Some(mut entity) = world.commands().get_entity(target_entity) { + if let Ok(mut entity) = world.commands().get_entity(target_entity) { // this "remove" operation must check emptiness because in the event that an identical // relationship is inserted on top, this despawn would result in the removal of that identical // relationship ... not what we want! @@ -143,7 +185,7 @@ pub type SourceIter<'w, R> = /// A [`Component`] containing the collection of entities that relate to this [`Entity`] via the associated `Relationship` type. /// See the [`Relationship`] documentation for more information. pub trait RelationshipTarget: Component + Sized { - /// If this is true, when despawning or cloning (when [recursion is enabled](crate::entity::EntityClonerBuilder::recursive)), the related entities targeting this entity will also be despawned or cloned. + /// If this is true, when despawning or cloning (when [linked cloning is enabled](crate::entity::EntityClonerBuilder::linked_cloning)), the related entities targeting this entity will also be despawned or cloned. /// /// For example, this is set to `true` for Bevy's built-in parent-child relation, defined by [`ChildOf`](crate::prelude::ChildOf) and [`Children`](crate::prelude::Children). /// This means that when a parent is despawned, any children targeting that parent are also despawned (and the same applies to cloning). @@ -167,40 +209,36 @@ pub trait RelationshipTarget: Component + Sized { /// /// # Warning /// This should generally not be called by user code, as modifying the internal collection could invalidate the relationship. + /// The collection should not contain duplicates. fn collection_mut_risky(&mut self) -> &mut Self::Collection; /// Creates a new [`RelationshipTarget`] from the given [`RelationshipTarget::Collection`]. /// /// # Warning /// This should generally not be called by user code, as constructing the internal collection could invalidate the relationship. + /// The collection should not contain duplicates. fn from_collection_risky(collection: Self::Collection) -> Self; /// The `on_replace` component hook that maintains the [`Relationship`] / [`RelationshipTarget`] connection. // note: think of this as "on_drop" fn on_replace(mut world: DeferredWorld, HookContext { entity, caller, .. }: HookContext) { - // NOTE: this unsafe code is an optimization. We could make this safe, but it would require - // copying the RelationshipTarget collection - // SAFETY: This only reads the Self component and queues Remove commands - unsafe { - let world = world.as_unsafe_world_cell(); - let relationship_target = world.get_entity(entity).unwrap().get::().unwrap(); - let mut commands = world.get_raw_command_queue(); - for source_entity in relationship_target.iter() { - if world.get_entity(source_entity).is_some() { - commands.push( - entity_command::remove::() - .with_entity(source_entity) - .handle_error_with(error_handler::silent()), - ); - } else { - warn!( - "{}Tried to despawn non-existent entity {}", - caller - .map(|location| format!("{location}: ")) - .unwrap_or_default(), - source_entity - ); - } + let (entities, mut commands) = world.entities_and_commands(); + let relationship_target = entities.get(entity).unwrap().get::().unwrap(); + for source_entity in relationship_target.iter() { + if entities.get(source_entity).is_ok() { + commands.queue( + entity_command::remove::() + .with_entity(source_entity) + .handle_error_with(ignore), + ); + } else { + warn!( + "{}Tried to despawn non-existent entity {}", + caller + .map(|location| format!("{location}: ")) + .unwrap_or_default(), + source_entity + ); } } } @@ -209,29 +247,23 @@ pub trait RelationshipTarget: Component + Sized { /// that entity is despawned. // note: think of this as "on_drop" fn on_despawn(mut world: DeferredWorld, HookContext { entity, caller, .. }: HookContext) { - // NOTE: this unsafe code is an optimization. We could make this safe, but it would require - // copying the RelationshipTarget collection - // SAFETY: This only reads the Self component and queues despawn commands - unsafe { - let world = world.as_unsafe_world_cell(); - let relationship_target = world.get_entity(entity).unwrap().get::().unwrap(); - let mut commands = world.get_raw_command_queue(); - for source_entity in relationship_target.iter() { - if world.get_entity(source_entity).is_some() { - commands.push( - entity_command::despawn() - .with_entity(source_entity) - .handle_error_with(error_handler::silent()), - ); - } else { - warn!( - "{}Tried to despawn non-existent entity {}", - caller - .map(|location| format!("{location}: ")) - .unwrap_or_default(), - source_entity - ); - } + let (entities, mut commands) = world.entities_and_commands(); + let relationship_target = entities.get(entity).unwrap().get::().unwrap(); + for source_entity in relationship_target.iter() { + if entities.get(source_entity).is_ok() { + commands.queue( + entity_command::despawn() + .with_entity(source_entity) + .handle_error_with(ignore), + ); + } else { + warn!( + "{}Tried to despawn non-existent entity {}", + caller + .map(|location| format!("{location}: ")) + .unwrap_or_default(), + source_entity + ); } } } @@ -271,19 +303,33 @@ pub trait RelationshipTarget: Component + Sized { /// This will also queue up clones of the relationship sources if the [`EntityCloner`](crate::entity::EntityCloner) is configured /// to spawn recursively. pub fn clone_relationship_target( - _commands: &mut Commands, + source: &SourceComponent, context: &mut ComponentCloneCtx, ) { - if let Some(component) = context.read_source_component::() { - if context.is_recursive() && T::LINKED_SPAWN { + if let Some(component) = source.read::() { + let mut cloned = T::with_capacity(component.len()); + if context.linked_cloning() && T::LINKED_SPAWN { + let collection = cloned.collection_mut_risky(); for entity in component.iter() { + collection.add(entity); context.queue_entity_clone(entity); } } - context.write_target_component(T::with_capacity(component.len())); + context.write_target_component(cloned); } } +/// Configures the conditions under which the Relationship insert/replace hooks will be run. +#[derive(Copy, Clone, Debug)] +pub enum RelationshipHookMode { + /// Relationship insert/replace hooks will always run + Run, + /// Relationship insert/replace hooks will run if [`RelationshipTarget::LINKED_SPAWN`] is false + RunIfNotLinked, + /// Relationship insert/replace hooks will always be skipped + Skip, +} + #[cfg(test)] mod tests { use crate::world::World; @@ -341,4 +387,41 @@ mod tests { assert!(!world.entity(b).contains::()); assert!(!world.entity(b).contains::()); } + + #[test] + fn relationship_with_multiple_non_target_fields_compiles() { + #[derive(Component)] + #[relationship(relationship_target=Target)] + #[expect(dead_code, reason = "test struct")] + struct Source { + #[relationship] + target: Entity, + foo: u8, + bar: u8, + } + + #[derive(Component)] + #[relationship_target(relationship=Source)] + struct Target(Vec); + + // No assert necessary, looking to make sure compilation works with the macros + } + #[test] + fn relationship_target_with_multiple_non_target_fields_compiles() { + #[derive(Component)] + #[relationship(relationship_target=Target)] + struct Source(Entity); + + #[derive(Component)] + #[relationship_target(relationship=Source)] + #[expect(dead_code, reason = "test struct")] + struct Target { + #[relationship] + target: Vec, + foo: u8, + bar: u8, + } + + // No assert necessary, looking to make sure compilation works with the macros + } } diff --git a/crates/bevy_ecs/src/relationship/related_methods.rs b/crates/bevy_ecs/src/relationship/related_methods.rs index 150bd02ebd..98ef8d0832 100644 --- a/crates/bevy_ecs/src/relationship/related_methods.rs +++ b/crates/bevy_ecs/src/relationship/related_methods.rs @@ -1,16 +1,29 @@ use crate::{ bundle::Bundle, - entity::Entity, - relationship::{Relationship, RelationshipTarget}, + entity::{hash_set::EntityHashSet, Entity}, + relationship::{ + Relationship, RelationshipHookMode, RelationshipSourceCollection, RelationshipTarget, + }, system::{Commands, EntityCommands}, world::{EntityWorldMut, World}, }; -use alloc::vec::Vec; -use core::marker::PhantomData; +use bevy_platform::prelude::{Box, Vec}; +use core::{marker::PhantomData, mem}; + +use super::OrderedRelationshipSourceCollection; impl<'w> EntityWorldMut<'w> { + /// Spawns a entity related to this entity (with the `R` relationship) by taking a bundle + pub fn with_related(&mut self, bundle: impl Bundle) -> &mut Self { + let parent = self.id(); + self.world_scope(|world| { + world.spawn((bundle, R::from(parent))); + }); + self + } + /// Spawns entities related to this entity (with the `R` relationship) by taking a function that operates on a [`RelatedSpawner`]. - pub fn with_related( + pub fn with_related_entities( &mut self, func: impl FnOnce(&mut RelatedSpawner), ) -> &mut Self { @@ -21,7 +34,9 @@ impl<'w> EntityWorldMut<'w> { self } - /// Relates the given entities to this entity with the relation `R` + /// Relates the given entities to this entity with the relation `R`. + /// + /// See [`add_one_related`](Self::add_one_related) if you want relate only one entity. pub fn add_related(&mut self, related: &[Entity]) -> &mut Self { let id = self.id(); self.world_scope(|world| { @@ -32,6 +47,241 @@ impl<'w> EntityWorldMut<'w> { self } + /// Relates the given entities to this entity with the relation `R`, starting at this particular index. + /// + /// If the `related` has duplicates, a related entity will take the index of its last occurrence in `related`. + /// If the indices go out of bounds, they will be clamped into bounds. + /// This will not re-order existing related entities unless they are in `related`. + /// + /// # Example + /// + /// ``` + /// use bevy_ecs::prelude::*; + /// + /// let mut world = World::new(); + /// let e0 = world.spawn_empty().id(); + /// let e1 = world.spawn_empty().id(); + /// let e2 = world.spawn_empty().id(); + /// let e3 = world.spawn_empty().id(); + /// let e4 = world.spawn_empty().id(); + /// + /// let mut main_entity = world.spawn_empty(); + /// main_entity.add_related::(&[e0, e1, e2, e2]); + /// main_entity.insert_related::(1, &[e0, e3, e4, e4]); + /// let main_id = main_entity.id(); + /// + /// let relationship_source = main_entity.get::().unwrap().collection(); + /// assert_eq!(relationship_source, &[e1, e0, e3, e2, e4]); + /// ``` + pub fn insert_related(&mut self, index: usize, related: &[Entity]) -> &mut Self + where + ::Collection: + OrderedRelationshipSourceCollection, + { + let id = self.id(); + self.world_scope(|world| { + for (offset, related) in related.iter().enumerate() { + let index = index + offset; + if world + .get::(*related) + .is_some_and(|relationship| relationship.get() == id) + { + world + .get_mut::(id) + .expect("hooks should have added relationship target") + .collection_mut_risky() + .place(*related, index); + } else { + world.entity_mut(*related).insert(R::from(id)); + world + .get_mut::(id) + .expect("hooks should have added relationship target") + .collection_mut_risky() + .place_most_recent(index); + } + } + }); + + self + } + + /// Removes the relation `R` between this entity and the given entities. + pub fn remove_related(&mut self, related: &[Entity]) -> &mut Self { + let id = self.id(); + self.world_scope(|world| { + for related in related { + if world + .get::(*related) + .is_some_and(|relationship| relationship.get() == id) + { + world.entity_mut(*related).remove::(); + } + } + }); + + self + } + + /// Replaces all the related entities with a new set of entities. + pub fn replace_related(&mut self, related: &[Entity]) -> &mut Self { + type Collection = + <::RelationshipTarget as RelationshipTarget>::Collection; + + if related.is_empty() { + self.remove::(); + + return self; + } + + let Some(mut existing_relations) = self.get_mut::() else { + return self.add_related::(related); + }; + + // We take the collection here so we can modify it without taking the component itself (this would create archetype move). + // SAFETY: We eventually return the correctly initialized collection into the target. + let mut existing_relations = mem::replace( + existing_relations.collection_mut_risky(), + Collection::::with_capacity(0), + ); + + let mut potential_relations = EntityHashSet::from_iter(related.iter().copied()); + + let id = self.id(); + self.world_scope(|world| { + for related in existing_relations.iter() { + if !potential_relations.remove(related) { + world.entity_mut(related).remove::(); + } + } + + for related in potential_relations { + // SAFETY: We'll manually be adjusting the contents of the parent to fit the final state. + world + .entity_mut(related) + .insert_with_relationship_hook_mode(R::from(id), RelationshipHookMode::Skip); + } + }); + + // SAFETY: The entities we're inserting will be the entities that were either already there or entities that we've just inserted. + existing_relations.clear(); + existing_relations.extend_from_iter(related.iter().copied()); + self.insert(R::RelationshipTarget::from_collection_risky( + existing_relations, + )); + + self + } + + /// Replaces all the related entities with a new set of entities. + /// + /// This is a more efficient of [`Self::replace_related`] which doesn't allocate. + /// The passed in arguments must adhere to these invariants: + /// - `entities_to_unrelate`: A slice of entities to remove from the relationship source. + /// Entities need not be related to this entity, but must not appear in `entities_to_relate` + /// - `entities_to_relate`: A slice of entities to relate to this entity. + /// This must contain all entities that will remain related (i.e. not those in `entities_to_unrelate`) plus the newly related entities. + /// - `newly_related_entities`: A subset of `entities_to_relate` containing only entities not already related to this entity. + /// - Slices **must not** contain any duplicates + /// + /// # Warning + /// + /// Violating these invariants may lead to panics, crashes or unpredictable engine behavior. + /// + /// # Panics + /// + /// Panics when debug assertions are enabled and any invariants are broken. + /// + // TODO: Consider making these iterators so users aren't required to allocate a separate buffers for the different slices. + pub fn replace_related_with_difference( + &mut self, + entities_to_unrelate: &[Entity], + entities_to_relate: &[Entity], + newly_related_entities: &[Entity], + ) -> &mut Self { + #[cfg(debug_assertions)] + { + let entities_to_relate = EntityHashSet::from_iter(entities_to_relate.iter().copied()); + let entities_to_unrelate = + EntityHashSet::from_iter(entities_to_unrelate.iter().copied()); + let mut newly_related_entities = + EntityHashSet::from_iter(newly_related_entities.iter().copied()); + assert!( + entities_to_relate.is_disjoint(&entities_to_unrelate), + "`entities_to_relate` ({entities_to_relate:?}) shared entities with `entities_to_unrelate` ({entities_to_unrelate:?})" + ); + assert!( + newly_related_entities.is_disjoint(&entities_to_unrelate), + "`newly_related_entities` ({newly_related_entities:?}) shared entities with `entities_to_unrelate ({entities_to_unrelate:?})`" + ); + assert!( + newly_related_entities.is_subset(&entities_to_relate), + "`newly_related_entities` ({newly_related_entities:?}) wasn't a subset of `entities_to_relate` ({entities_to_relate:?})" + ); + + if let Some(target) = self.get::() { + let existing_relationships: EntityHashSet = target.collection().iter().collect(); + + assert!( + existing_relationships.is_disjoint(&newly_related_entities), + "`newly_related_entities` contains an entity that wouldn't be newly related" + ); + + newly_related_entities.extend(existing_relationships); + newly_related_entities -= &entities_to_unrelate; + } + + assert_eq!(newly_related_entities, entities_to_relate, "`entities_to_relate` ({entities_to_relate:?}) didn't contain all entities that would end up related"); + }; + + if !self.contains::() { + self.add_related::(entities_to_relate); + + return self; + }; + + let this = self.id(); + self.world_scope(|world| { + for unrelate in entities_to_unrelate { + world.entity_mut(*unrelate).remove::(); + } + + for new_relation in newly_related_entities { + // We're changing the target collection manually so don't run the insert hook + world + .entity_mut(*new_relation) + .insert_with_relationship_hook_mode(R::from(this), RelationshipHookMode::Skip); + } + }); + + if !entities_to_relate.is_empty() { + if let Some(mut target) = self.get_mut::() { + // SAFETY: The invariants expected by this function mean we'll only be inserting entities that are already related. + let collection = target.collection_mut_risky(); + collection.clear(); + + collection.extend_from_iter(entities_to_relate.iter().copied()); + } else { + let mut empty = + ::Collection::with_capacity( + entities_to_relate.len(), + ); + empty.extend_from_iter(entities_to_relate.iter().copied()); + + // SAFETY: We've just initialized this collection and we know there's no `RelationshipTarget` on `self` + self.insert(R::RelationshipTarget::from_collection_risky(empty)); + } + } + + self + } + + /// Relates the given entity to this with the relation `R`. + /// + /// See [`add_related`](Self::add_related) if you want to relate more than one entity. + pub fn add_one_related(&mut self, entity: Entity) -> &mut Self { + self.add_related::(&[entity]) + } + /// Despawns entities that relate to this one via the given [`RelationshipTarget`]. /// This entity will not be despawned. pub fn despawn_related(&mut self) -> &mut Self { @@ -98,8 +348,15 @@ impl<'w> EntityWorldMut<'w> { } impl<'a> EntityCommands<'a> { + /// Spawns a entity related to this entity (with the `R` relationship) by taking a bundle + pub fn with_related(&mut self, bundle: impl Bundle) -> &mut Self { + let parent = self.id(); + self.commands.spawn((bundle, R::from(parent))); + self + } + /// Spawns entities related to this entity (with the `R` relationship) by taking a function that operates on a [`RelatedSpawner`]. - pub fn with_related( + pub fn with_related_entities( &mut self, func: impl FnOnce(&mut RelatedSpawnerCommands), ) -> &mut Self { @@ -108,26 +365,94 @@ impl<'a> EntityCommands<'a> { self } - /// Relates the given entities to this entity with the relation `R` + /// Relates the given entities to this entity with the relation `R`. + /// + /// See [`add_one_related`](Self::add_one_related) if you want relate only one entity. pub fn add_related(&mut self, related: &[Entity]) -> &mut Self { - let id = self.id(); - let related = related.to_vec(); - self.commands().queue(move |world: &mut World| { - for related in related { - world.entity_mut(related).insert(R::from(id)); - } - }); - self + let related: Box<[Entity]> = related.into(); + + self.queue(move |mut entity: EntityWorldMut| { + entity.add_related::(&related); + }) + } + + /// Relates the given entities to this entity with the relation `R`, starting at this particular index. + /// + /// If the `related` has duplicates, a related entity will take the index of its last occurrence in `related`. + /// If the indices go out of bounds, they will be clamped into bounds. + /// This will not re-order existing related entities unless they are in `related`. + pub fn insert_related(&mut self, index: usize, related: &[Entity]) -> &mut Self + where + ::Collection: + OrderedRelationshipSourceCollection, + { + let related: Box<[Entity]> = related.into(); + + self.queue(move |mut entity: EntityWorldMut| { + entity.insert_related::(index, &related); + }) + } + + /// Relates the given entity to this with the relation `R`. + /// + /// See [`add_related`](Self::add_related) if you want to relate more than one entity. + pub fn add_one_related(&mut self, entity: Entity) -> &mut Self { + self.add_related::(&[entity]) + } + + /// Removes the relation `R` between this entity and the given entities. + pub fn remove_related(&mut self, related: &[Entity]) -> &mut Self { + let related: Box<[Entity]> = related.into(); + + self.queue(move |mut entity: EntityWorldMut| { + entity.remove_related::(&related); + }) + } + + /// Replaces all the related entities with the given set of new related entities. + pub fn replace_related(&mut self, related: &[Entity]) -> &mut Self { + let related: Box<[Entity]> = related.into(); + + self.queue(move |mut entity: EntityWorldMut| { + entity.replace_related::(&related); + }) + } + + /// Replaces all the related entities with a new set of entities. + /// + /// # Warning + /// + /// Failing to maintain the functions invariants may lead to erratic engine behavior including random crashes. + /// Refer to [`EntityWorldMut::replace_related_with_difference`] for a list of these invariants. + /// + /// # Panics + /// + /// Panics when debug assertions are enable, an invariant is are broken and the command is executed. + pub fn replace_related_with_difference( + &mut self, + entities_to_unrelate: &[Entity], + entities_to_relate: &[Entity], + newly_related_entities: &[Entity], + ) -> &mut Self { + let entities_to_unrelate: Box<[Entity]> = entities_to_unrelate.into(); + let entities_to_relate: Box<[Entity]> = entities_to_relate.into(); + let newly_related_entities: Box<[Entity]> = newly_related_entities.into(); + + self.queue(move |mut entity: EntityWorldMut| { + entity.replace_related_with_difference::( + &entities_to_unrelate, + &entities_to_relate, + &newly_related_entities, + ); + }) } /// Despawns entities that relate to this one via the given [`RelationshipTarget`]. /// This entity will not be despawned. pub fn despawn_related(&mut self) -> &mut Self { - let id = self.id(); - self.commands.queue(move |world: &mut World| { - world.entity_mut(id).despawn_related::(); - }); - self + self.queue(move |mut entity: EntityWorldMut| { + entity.despawn_related::(); + }) } /// Inserts a component or bundle of components into the entity and all related entities, @@ -141,11 +466,9 @@ impl<'a> EntityCommands<'a> { &mut self, bundle: impl Bundle + Clone, ) -> &mut Self { - let id = self.id(); - self.commands.queue(move |world: &mut World| { - world.entity_mut(id).insert_recursive::(bundle); - }); - self + self.queue(move |mut entity: EntityWorldMut| { + entity.insert_recursive::(bundle); + }) } /// Removes a component or bundle of components of type `B` from the entity and all related entities, @@ -156,11 +479,9 @@ impl<'a> EntityCommands<'a> { /// This method should only be called on relationships that form a tree-like structure. /// Any cycles will cause this method to loop infinitely. pub fn remove_recursive(&mut self) -> &mut Self { - let id = self.id(); - self.commands.queue(move |world: &mut World| { - world.entity_mut(id).remove_recursive::(); - }); - self + self.queue(move |mut entity: EntityWorldMut| { + entity.remove_recursive::(); + }) } } diff --git a/crates/bevy_ecs/src/relationship/relationship_source_collection.rs b/crates/bevy_ecs/src/relationship/relationship_source_collection.rs index 013cdd63aa..c2c9bd94d8 100644 --- a/crates/bevy_ecs/src/relationship/relationship_source_collection.rs +++ b/crates/bevy_ecs/src/relationship/relationship_source_collection.rs @@ -16,14 +16,31 @@ pub trait RelationshipSourceCollection { where Self: 'a; + /// Creates a new empty instance. + fn new() -> Self; + /// Returns an instance with the given pre-allocated entity `capacity`. + /// + /// Some collections will ignore the provided `capacity` and return a default instance. fn with_capacity(capacity: usize) -> Self; + /// Reserves capacity for at least `additional` more entities to be inserted. + /// + /// Not all collections support this operation, in which case it is a no-op. + fn reserve(&mut self, additional: usize); + /// Adds the given `entity` to the collection. - fn add(&mut self, entity: Entity); + /// + /// Returns whether the entity was added to the collection. + /// Mainly useful when dealing with collections that don't allow + /// multiple instances of the same entity ([`EntityHashSet`]). + fn add(&mut self, entity: Entity) -> bool; /// Removes the given `entity` from the collection. - fn remove(&mut self, entity: Entity); + /// + /// Returns whether the collection actually contained + /// the entity. + fn remove(&mut self, entity: Entity) -> bool; /// Iterates all entities in the collection. fn iter(&self) -> Self::SourceIter<'_>; @@ -31,28 +48,112 @@ pub trait RelationshipSourceCollection { /// Returns the current length of the collection. fn len(&self) -> usize; + /// Clears the collection. + fn clear(&mut self); + + /// Attempts to save memory by shrinking the capacity to fit the current length. + /// + /// This operation is a no-op for collections that do not support it. + fn shrink_to_fit(&mut self); + /// Returns true if the collection contains no entities. #[inline] fn is_empty(&self) -> bool { self.len() == 0 } + + /// Add multiple entities to collection at once. + /// + /// May be faster than repeatedly calling [`Self::add`]. + fn extend_from_iter(&mut self, entities: impl IntoIterator) { + // The method name shouldn't conflict with `Extend::extend` as it's in the rust prelude and + // would always conflict with it. + for entity in entities { + self.add(entity); + } + } +} + +/// This trait signals that a [`RelationshipSourceCollection`] is ordered. +pub trait OrderedRelationshipSourceCollection: RelationshipSourceCollection { + /// Inserts the entity at a specific index. + /// If the index is too large, the entity will be added to the end of the collection. + fn insert(&mut self, index: usize, entity: Entity); + /// Removes the entity at the specified idnex if it exists. + fn remove_at(&mut self, index: usize) -> Option; + /// Inserts the entity at a specific index. + /// This will never reorder other entities. + /// If the index is too large, the entity will be added to the end of the collection. + fn insert_stable(&mut self, index: usize, entity: Entity); + /// Removes the entity at the specified idnex if it exists. + /// This will never reorder other entities. + fn remove_at_stable(&mut self, index: usize) -> Option; + /// Sorts the source collection. + fn sort(&mut self); + /// Inserts the entity at the proper place to maintain sorting. + fn insert_sorted(&mut self, entity: Entity); + + /// This places the most recently added entity at the particular index. + fn place_most_recent(&mut self, index: usize); + + /// This places the given entity at the particular index. + /// This will do nothing if the entity is not in the collection. + /// If the index is out of bounds, this will put the entity at the end. + fn place(&mut self, entity: Entity, index: usize); + + /// Adds the entity at index 0. + fn push_front(&mut self, entity: Entity) { + self.insert(0, entity); + } + + /// Adds the entity to the back of the collection. + fn push_back(&mut self, entity: Entity) { + self.insert(usize::MAX, entity); + } + + /// Removes the first entity. + fn pop_front(&mut self) -> Option { + self.remove_at(0) + } + + /// Removes the last entity. + fn pop_back(&mut self) -> Option { + if self.is_empty() { + None + } else { + self.remove_at(self.len() - 1) + } + } } impl RelationshipSourceCollection for Vec { type SourceIter<'a> = core::iter::Copied>; + fn new() -> Self { + Vec::new() + } + + fn reserve(&mut self, additional: usize) { + Vec::reserve(self, additional); + } + fn with_capacity(capacity: usize) -> Self { Vec::with_capacity(capacity) } - fn add(&mut self, entity: Entity) { + fn add(&mut self, entity: Entity) -> bool { Vec::push(self, entity); + + true } - fn remove(&mut self, entity: Entity) { + fn remove(&mut self, entity: Entity) -> bool { if let Some(index) = <[Entity]>::iter(self).position(|e| *e == entity) { Vec::remove(self, index); + return true; } + + false } fn iter(&self) -> Self::SourceIter<'_> { @@ -62,23 +163,94 @@ impl RelationshipSourceCollection for Vec { fn len(&self) -> usize { Vec::len(self) } + + fn clear(&mut self) { + self.clear(); + } + + fn shrink_to_fit(&mut self) { + Vec::shrink_to_fit(self); + } + + fn extend_from_iter(&mut self, entities: impl IntoIterator) { + self.extend(entities); + } +} + +impl OrderedRelationshipSourceCollection for Vec { + fn insert(&mut self, index: usize, entity: Entity) { + self.push(entity); + let len = self.len(); + if index < len { + self.swap(index, len - 1); + } + } + + fn remove_at(&mut self, index: usize) -> Option { + (index < self.len()).then(|| self.swap_remove(index)) + } + + fn insert_stable(&mut self, index: usize, entity: Entity) { + if index < self.len() { + Vec::insert(self, index, entity); + } else { + self.push(entity); + } + } + + fn remove_at_stable(&mut self, index: usize) -> Option { + (index < self.len()).then(|| self.remove(index)) + } + + fn sort(&mut self) { + self.sort_unstable(); + } + + fn insert_sorted(&mut self, entity: Entity) { + let index = self.partition_point(|e| e <= &entity); + self.insert_stable(index, entity); + } + + fn place_most_recent(&mut self, index: usize) { + if let Some(entity) = self.pop() { + let index = index.min(self.len().saturating_sub(1)); + self.insert(index, entity); + } + } + + fn place(&mut self, entity: Entity, index: usize) { + if let Some(current) = <[Entity]>::iter(self).position(|e| *e == entity) { + // The len is at least 1, so the subtraction is safe. + let index = index.min(self.len().saturating_sub(1)); + Vec::remove(self, current); + self.insert(index, entity); + }; + } } impl RelationshipSourceCollection for EntityHashSet { type SourceIter<'a> = core::iter::Copied>; + fn new() -> Self { + EntityHashSet::new() + } + + fn reserve(&mut self, additional: usize) { + self.0.reserve(additional); + } + fn with_capacity(capacity: usize) -> Self { EntityHashSet::with_capacity(capacity) } - fn add(&mut self, entity: Entity) { - self.insert(entity); + fn add(&mut self, entity: Entity) -> bool { + self.insert(entity) } - fn remove(&mut self, entity: Entity) { + fn remove(&mut self, entity: Entity) -> bool { // We need to call the remove method on the underlying hash set, // which takes its argument by reference - self.0.remove(&entity); + self.0.remove(&entity) } fn iter(&self) -> Self::SourceIter<'_> { @@ -88,23 +260,48 @@ impl RelationshipSourceCollection for EntityHashSet { fn len(&self) -> usize { self.len() } + + fn clear(&mut self) { + self.0.clear(); + } + + fn shrink_to_fit(&mut self) { + self.0.shrink_to_fit(); + } + + fn extend_from_iter(&mut self, entities: impl IntoIterator) { + self.extend(entities); + } } impl RelationshipSourceCollection for SmallVec<[Entity; N]> { type SourceIter<'a> = core::iter::Copied>; + fn new() -> Self { + SmallVec::new() + } + + fn reserve(&mut self, additional: usize) { + SmallVec::reserve(self, additional); + } + fn with_capacity(capacity: usize) -> Self { SmallVec::with_capacity(capacity) } - fn add(&mut self, entity: Entity) { + fn add(&mut self, entity: Entity) -> bool { SmallVec::push(self, entity); + + true } - fn remove(&mut self, entity: Entity) { + fn remove(&mut self, entity: Entity) -> bool { if let Some(index) = <[Entity]>::iter(self).position(|e| *e == entity) { SmallVec::remove(self, index); + return true; } + + false } fn iter(&self) -> Self::SourceIter<'_> { @@ -114,6 +311,138 @@ impl RelationshipSourceCollection for SmallVec<[Entity; N]> { fn len(&self) -> usize { SmallVec::len(self) } + + fn clear(&mut self) { + self.clear(); + } + + fn shrink_to_fit(&mut self) { + SmallVec::shrink_to_fit(self); + } + + fn extend_from_iter(&mut self, entities: impl IntoIterator) { + self.extend(entities); + } +} + +impl RelationshipSourceCollection for Entity { + type SourceIter<'a> = core::option::IntoIter; + + fn new() -> Self { + Entity::PLACEHOLDER + } + + fn reserve(&mut self, _: usize) {} + + fn with_capacity(_capacity: usize) -> Self { + Self::new() + } + + fn add(&mut self, entity: Entity) -> bool { + assert_eq!( + *self, + Entity::PLACEHOLDER, + "Entity {entity} attempted to target an entity with a one-to-one relationship, but it is already targeted by {}. You must remove the original relationship first.", + *self + ); + *self = entity; + + true + } + + fn remove(&mut self, entity: Entity) -> bool { + if *self == entity { + *self = Entity::PLACEHOLDER; + + return true; + } + + false + } + + fn iter(&self) -> Self::SourceIter<'_> { + if *self == Entity::PLACEHOLDER { + None.into_iter() + } else { + Some(*self).into_iter() + } + } + + fn len(&self) -> usize { + if *self == Entity::PLACEHOLDER { + return 0; + } + 1 + } + + fn clear(&mut self) { + *self = Entity::PLACEHOLDER; + } + + fn shrink_to_fit(&mut self) {} + + fn extend_from_iter(&mut self, entities: impl IntoIterator) { + for entity in entities { + assert_eq!( + *self, + Entity::PLACEHOLDER, + "Entity {entity} attempted to target an entity with a one-to-one relationship, but it is already targeted by {}. You must remove the original relationship first.", + *self + ); + *self = entity; + } + } +} + +impl OrderedRelationshipSourceCollection for SmallVec<[Entity; N]> { + fn insert(&mut self, index: usize, entity: Entity) { + self.push(entity); + let len = self.len(); + if index < len { + self.swap(index, len - 1); + } + } + + fn remove_at(&mut self, index: usize) -> Option { + (index < self.len()).then(|| self.swap_remove(index)) + } + + fn insert_stable(&mut self, index: usize, entity: Entity) { + if index < self.len() { + SmallVec::<[Entity; N]>::insert(self, index, entity); + } else { + self.push(entity); + } + } + + fn remove_at_stable(&mut self, index: usize) -> Option { + (index < self.len()).then(|| self.remove(index)) + } + + fn sort(&mut self) { + self.sort_unstable(); + } + + fn insert_sorted(&mut self, entity: Entity) { + let index = self.partition_point(|e| e <= &entity); + self.insert_stable(index, entity); + } + + fn place_most_recent(&mut self, index: usize) { + if let Some(entity) = self.pop() { + let index = index.min(self.len() - 1); + self.insert(index, entity); + } + } + + fn place(&mut self, entity: Entity, index: usize) { + if let Some(current) = <[Entity]>::iter(self).position(|e| *e == entity) { + // The len is at least 1, so the subtraction is safe. + let index = index.min(self.len() - 1); + SmallVec::<[Entity; N]>::remove(self, current); + self.insert(index, entity); + }; + } } #[cfg(test)] @@ -143,27 +472,6 @@ mod tests { assert_eq!(collection, &alloc::vec!(a)); } - #[test] - fn entity_hash_set_relationship_source_collection() { - #[derive(Component)] - #[relationship(relationship_target = RelTarget)] - struct Rel(Entity); - - #[derive(Component)] - #[relationship_target(relationship = Rel, linked_spawn)] - struct RelTarget(EntityHashSet); - - let mut world = World::new(); - let a = world.spawn_empty().id(); - let b = world.spawn_empty().id(); - - world.entity_mut(a).insert(Rel(b)); - - let rel_target = world.get::(b).unwrap(); - let collection = rel_target.collection(); - assert_eq!(collection, &EntityHashSet::from([a])); - } - #[test] fn smallvec_relationship_source_collection() { #[derive(Component)] @@ -184,4 +492,96 @@ mod tests { let collection = rel_target.collection(); assert_eq!(collection, &SmallVec::from_buf([a])); } + + #[test] + fn entity_relationship_source_collection() { + #[derive(Component)] + #[relationship(relationship_target = RelTarget)] + struct Rel(Entity); + + #[derive(Component)] + #[relationship_target(relationship = Rel)] + struct RelTarget(Entity); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + + world.entity_mut(a).insert(Rel(b)); + + let rel_target = world.get::(b).unwrap(); + let collection = rel_target.collection(); + assert_eq!(collection, &a); + } + + #[test] + fn one_to_one_relationships() { + #[derive(Component)] + #[relationship(relationship_target = Below)] + struct Above(Entity); + + #[derive(Component)] + #[relationship_target(relationship = Above)] + struct Below(Entity); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + + world.entity_mut(a).insert(Above(b)); + assert_eq!(a, world.get::(b).unwrap().0); + + // Verify removing target removes relationship + world.entity_mut(b).remove::(); + assert!(world.get::(a).is_none()); + + // Verify removing relationship removes target + world.entity_mut(a).insert(Above(b)); + world.entity_mut(a).remove::(); + assert!(world.get::(b).is_none()); + + // Actually - a is above c now! Verify relationship was updated correctly + let c = world.spawn_empty().id(); + world.entity_mut(a).insert(Above(c)); + assert!(world.get::(b).is_none()); + assert_eq!(a, world.get::(c).unwrap().0); + } + + #[test] + #[should_panic] + fn one_to_one_relationship_shared_target() { + #[derive(Component)] + #[relationship(relationship_target = Below)] + struct Above(Entity); + + #[derive(Component)] + #[relationship_target(relationship = Above)] + struct Below(Entity); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + let c = world.spawn_empty().id(); + + world.entity_mut(a).insert(Above(c)); + world.entity_mut(b).insert(Above(c)); + } + + #[test] + fn one_to_one_relationship_reinsert() { + #[derive(Component)] + #[relationship(relationship_target = Below)] + struct Above(Entity); + + #[derive(Component)] + #[relationship_target(relationship = Above)] + struct Below(Entity); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + + world.entity_mut(a).insert(Above(b)); + world.entity_mut(a).insert(Above(b)); + } } diff --git a/crates/bevy_ecs/src/removal_detection.rs b/crates/bevy_ecs/src/removal_detection.rs index a1480a1923..64cc63a7ce 100644 --- a/crates/bevy_ecs/src/removal_detection.rs +++ b/crates/bevy_ecs/src/removal_detection.rs @@ -26,7 +26,7 @@ use core::{ /// Internally, `RemovedComponents` uses these as an `Events`. #[derive(Event, Debug, Clone, Into)] #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] -#[cfg_attr(feature = "bevy_reflect", reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", reflect(Debug, Clone))] pub struct RemovedComponentEntity(Entity); /// Wrapper around a [`EventCursor`] so that we diff --git a/crates/bevy_ecs/src/result.rs b/crates/bevy_ecs/src/result.rs deleted file mode 100644 index 77e7fc0074..0000000000 --- a/crates/bevy_ecs/src/result.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! Contains error and result helpers for use in fallible systems. - -use alloc::boxed::Box; - -/// A dynamic error type for use in fallible systems. -pub type Error = Box; - -/// A result type for use in fallible systems. -pub type Result = core::result::Result; diff --git a/crates/bevy_ecs/src/schedule/auto_insert_apply_deferred.rs b/crates/bevy_ecs/src/schedule/auto_insert_apply_deferred.rs index 8ad4725d86..dda6d604a7 100644 --- a/crates/bevy_ecs/src/schedule/auto_insert_apply_deferred.rs +++ b/crates/bevy_ecs/src/schedule/auto_insert_apply_deferred.rs @@ -1,6 +1,6 @@ use alloc::{boxed::Box, collections::BTreeSet, vec::Vec}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use crate::system::IntoSystem; use crate::world::World; @@ -80,41 +80,132 @@ impl ScheduleBuildPass for AutoInsertApplyDeferredPass { let mut sync_point_graph = dependency_flattened.clone(); let topo = graph.topsort_graph(dependency_flattened, ReportCycles::Dependency)?; - // calculate the number of sync points each sync point is from the beginning of the graph - // use the same sync point if the distance is the same - let mut distances: HashMap> = + fn set_has_conditions(graph: &ScheduleGraph, node: NodeId) -> bool { + !graph.set_conditions_at(node).is_empty() + || graph + .hierarchy() + .graph() + .edges_directed(node, Direction::Incoming) + .any(|(parent, _)| set_has_conditions(graph, parent)) + } + + fn system_has_conditions(graph: &ScheduleGraph, node: NodeId) -> bool { + assert!(node.is_system()); + !graph.system_conditions[node.index()].is_empty() + || graph + .hierarchy() + .graph() + .edges_directed(node, Direction::Incoming) + .any(|(parent, _)| set_has_conditions(graph, parent)) + } + + let mut system_has_conditions_cache = HashMap::::default(); + let mut is_valid_explicit_sync_point = |system: NodeId| { + let index = system.index(); + is_apply_deferred(graph.systems[index].get().unwrap()) + && !*system_has_conditions_cache + .entry(index) + .or_insert_with(|| system_has_conditions(graph, system)) + }; + + // Calculate the distance for each node. + // The "distance" is the number of sync points between a node and the beginning of the graph. + // Also store if a preceding edge would have added a sync point but was ignored to add it at + // a later edge that is not ignored. + let mut distances_and_pending_sync: HashMap = HashMap::with_capacity_and_hasher(topo.len(), Default::default()); + + // Keep track of any explicit sync nodes for a specific distance. + let mut distance_to_explicit_sync_node: HashMap = HashMap::default(); + + // Determine the distance for every node and collect the explicit sync points. for node in &topo { - let add_sync_after = graph.systems[node.index()].get().unwrap().has_deferred(); + let (node_distance, mut node_needs_sync) = distances_and_pending_sync + .get(&node.index()) + .copied() + .unwrap_or_default(); + + if is_valid_explicit_sync_point(*node) { + // The distance of this sync point does not change anymore as the iteration order + // makes sure that this node is no unvisited target of another node. + // Because of this, the sync point can be stored for this distance to be reused as + // automatically added sync points later. + distance_to_explicit_sync_node.insert(node_distance, *node); + + // This node just did a sync, so the only reason to do another sync is if one was + // explicitly scheduled afterwards. + node_needs_sync = false; + } else if !node_needs_sync { + // No previous node has postponed sync points to add so check if the system itself + // has deferred params that require a sync point to apply them. + node_needs_sync = graph.systems[node.index()].get().unwrap().has_deferred(); + } for target in dependency_flattened.neighbors_directed(*node, Direction::Outgoing) { - let add_sync_on_edge = add_sync_after - && !is_apply_deferred(graph.systems[target.index()].get().unwrap()) - && !self.no_sync_edges.contains(&(*node, target)); + let (target_distance, target_pending_sync) = distances_and_pending_sync + .entry(target.index()) + .or_default(); - let weight = if add_sync_on_edge { 1 } else { 0 }; - - let distance = distances - .get(&target.index()) - .unwrap_or(&None) - .or(Some(0)) - .map(|distance| { - distance.max( - distances.get(&node.index()).unwrap_or(&None).unwrap_or(0) + weight, - ) - }); - - distances.insert(target.index(), distance); - - if add_sync_on_edge { - let sync_point = - self.get_sync_point(graph, distances[&target.index()].unwrap()); - sync_point_graph.add_edge(*node, sync_point); - sync_point_graph.add_edge(sync_point, target); - - // edge is now redundant - sync_point_graph.remove_edge(*node, target); + let mut edge_needs_sync = node_needs_sync; + if node_needs_sync + && !graph.systems[target.index()].get().unwrap().is_exclusive() + && self.no_sync_edges.contains(&(*node, target)) + { + // The node has deferred params to apply, but this edge is ignoring sync points. + // Mark the target as 'delaying' those commands to a future edge and the current + // edge as not needing a sync point. + *target_pending_sync = true; + edge_needs_sync = false; } + + let mut weight = 0; + if edge_needs_sync || is_valid_explicit_sync_point(target) { + // The target distance grows if a sync point is added between it and the node. + // Also raise the distance if the target is a sync point itself so it then again + // raises the distance of following nodes as that is what the distance is about. + weight = 1; + } + + // The target cannot have fewer sync points in front of it than the preceding node. + *target_distance = (node_distance + weight).max(*target_distance); + } + } + + // Find any edges which have a different number of sync points between them and make sure + // there is a sync point between them. + for node in &topo { + let (node_distance, _) = distances_and_pending_sync + .get(&node.index()) + .copied() + .unwrap_or_default(); + + for target in dependency_flattened.neighbors_directed(*node, Direction::Outgoing) { + let (target_distance, _) = distances_and_pending_sync + .get(&target.index()) + .copied() + .unwrap_or_default(); + + if node_distance == target_distance { + // These nodes are the same distance, so they don't need an edge between them. + continue; + } + + if is_apply_deferred(graph.systems[target.index()].get().unwrap()) { + // We don't need to insert a sync point since ApplyDeferred is a sync point + // already! + continue; + } + + let sync_point = distance_to_explicit_sync_node + .get(&target_distance) + .copied() + .unwrap_or_else(|| self.get_sync_point(graph, target_distance)); + + sync_point_graph.add_edge(*node, sync_point); + sync_point_graph.add_edge(sync_point, target); + + // The edge without the sync point is now redundant. + sync_point_graph.remove_edge(*node, target); } } diff --git a/crates/bevy_ecs/src/schedule/condition.rs b/crates/bevy_ecs/src/schedule/condition.rs index 1bd0e9f4da..a85a8c6fa4 100644 --- a/crates/bevy_ecs/src/schedule/condition.rs +++ b/crates/bevy_ecs/src/schedule/condition.rs @@ -1267,7 +1267,7 @@ mod tests { use crate::{ change_detection::ResMut, component::Component, - schedule::{IntoSystemConfigs, Schedule}, + schedule::{IntoScheduleConfigs, Schedule}, system::Local, world::World, }; diff --git a/crates/bevy_ecs/src/schedule/config.rs b/crates/bevy_ecs/src/schedule/config.rs index 898cf67424..b98205e32b 100644 --- a/crates/bevy_ecs/src/schedule/config.rs +++ b/crates/bevy_ecs/src/schedule/config.rs @@ -2,7 +2,8 @@ use alloc::{boxed::Box, vec, vec::Vec}; use variadics_please::all_tuples; use crate::{ - result::Result, + error::Result, + never::Never, schedule::{ auto_insert_apply_deferred::IgnoreDeferred, condition::{BoxedCondition, Condition}, @@ -36,61 +37,87 @@ fn ambiguous_with(graph_info: &mut GraphInfo, set: InternedSystemSet) { } } +/// Stores data to differentiate different schedulable structs. +pub trait Schedulable { + /// Additional data used to configure independent scheduling. Stored in [`ScheduleConfig`]. + type Metadata; + /// Additional data used to configure a schedulable group. Stored in [`ScheduleConfigs`]. + type GroupMetadata; + + /// Initializes a configuration from this node. + fn into_config(self) -> ScheduleConfig + where + Self: Sized; +} + +impl Schedulable for ScheduleSystem { + type Metadata = GraphInfo; + type GroupMetadata = Chain; + + fn into_config(self) -> ScheduleConfig { + let sets = self.default_system_sets().clone(); + ScheduleConfig { + node: self, + metadata: GraphInfo { + hierarchy: sets, + ..Default::default() + }, + conditions: Vec::new(), + } + } +} + +impl Schedulable for InternedSystemSet { + type Metadata = GraphInfo; + type GroupMetadata = Chain; + + fn into_config(self) -> ScheduleConfig { + assert!( + self.system_type().is_none(), + "configuring system type sets is not allowed" + ); + + ScheduleConfig { + node: self, + metadata: GraphInfo::default(), + conditions: Vec::new(), + } + } +} + /// Stores configuration for a single generic node (a system or a system set) /// /// The configuration includes the node itself, scheduling metadata /// (hierarchy: in which sets is the node contained, /// dependencies: before/after which other nodes should this node run) /// and the run conditions associated with this node. -pub struct NodeConfig { +pub struct ScheduleConfig { pub(crate) node: T, - /// Hierarchy and dependency metadata for this node - pub(crate) graph_info: GraphInfo, + pub(crate) metadata: T::Metadata, pub(crate) conditions: Vec, } -/// Stores configuration for a single system. -pub type SystemConfig = NodeConfig; - -/// A collections of generic [`NodeConfig`]s. -pub enum NodeConfigs { - /// Configuration for a single node. - NodeConfig(NodeConfig), +/// Single or nested configurations for [`Schedulable`]s. +pub enum ScheduleConfigs { + /// Configuration for a single [`Schedulable`]. + ScheduleConfig(ScheduleConfig), /// Configuration for a tuple of nested `Configs` instances. Configs { /// Configuration for each element of the tuple. - configs: Vec>, + configs: Vec>, /// Run conditions applied to everything in the tuple. collective_conditions: Vec, - /// See [`Chain`] for usage. - chained: Chain, + /// Metadata to be applied to all elements in the tuple. + metadata: T::GroupMetadata, }, } -/// A collection of [`SystemConfig`]. -pub type SystemConfigs = NodeConfigs; - -impl SystemConfigs { - fn new_system(system: ScheduleSystem) -> Self { - // include system in its default sets - let sets = system.default_system_sets().into_iter().collect(); - Self::NodeConfig(SystemConfig { - node: system, - graph_info: GraphInfo { - hierarchy: sets, - ..Default::default() - }, - conditions: Vec::new(), - }) - } -} - -impl NodeConfigs { +impl> ScheduleConfigs { /// Adds a new boxed system set to the systems. pub fn in_set_inner(&mut self, set: InternedSystemSet) { match self { - Self::NodeConfig(config) => { - config.graph_info.hierarchy.push(set); + Self::ScheduleConfig(config) => { + config.metadata.hierarchy.push(set); } Self::Configs { configs, .. } => { for config in configs { @@ -102,9 +129,9 @@ impl NodeConfigs { fn before_inner(&mut self, set: InternedSystemSet) { match self { - Self::NodeConfig(config) => { + Self::ScheduleConfig(config) => { config - .graph_info + .metadata .dependencies .push(Dependency::new(DependencyKind::Before, set)); } @@ -118,9 +145,9 @@ impl NodeConfigs { fn after_inner(&mut self, set: InternedSystemSet) { match self { - Self::NodeConfig(config) => { + Self::ScheduleConfig(config) => { config - .graph_info + .metadata .dependencies .push(Dependency::new(DependencyKind::After, set)); } @@ -134,9 +161,9 @@ impl NodeConfigs { fn before_ignore_deferred_inner(&mut self, set: InternedSystemSet) { match self { - Self::NodeConfig(config) => { + Self::ScheduleConfig(config) => { config - .graph_info + .metadata .dependencies .push(Dependency::new(DependencyKind::Before, set).add_config(IgnoreDeferred)); } @@ -150,9 +177,9 @@ impl NodeConfigs { fn after_ignore_deferred_inner(&mut self, set: InternedSystemSet) { match self { - Self::NodeConfig(config) => { + Self::ScheduleConfig(config) => { config - .graph_info + .metadata .dependencies .push(Dependency::new(DependencyKind::After, set).add_config(IgnoreDeferred)); } @@ -166,7 +193,7 @@ impl NodeConfigs { fn distributive_run_if_inner(&mut self, condition: impl Condition + Clone) { match self { - Self::NodeConfig(config) => { + Self::ScheduleConfig(config) => { config.conditions.push(new_condition(condition)); } Self::Configs { configs, .. } => { @@ -179,8 +206,8 @@ impl NodeConfigs { fn ambiguous_with_inner(&mut self, set: InternedSystemSet) { match self { - Self::NodeConfig(config) => { - ambiguous_with(&mut config.graph_info, set); + Self::ScheduleConfig(config) => { + ambiguous_with(&mut config.metadata, set); } Self::Configs { configs, .. } => { for config in configs { @@ -192,8 +219,8 @@ impl NodeConfigs { fn ambiguous_with_all_inner(&mut self) { match self { - Self::NodeConfig(config) => { - config.graph_info.ambiguous_with = Ambiguity::IgnoreAll; + Self::ScheduleConfig(config) => { + config.metadata.ambiguous_with = Ambiguity::IgnoreAll; } Self::Configs { configs, .. } => { for config in configs { @@ -209,7 +236,7 @@ impl NodeConfigs { /// Prefer `run_if` for run conditions whose type is known at compile time. pub fn run_if_dyn(&mut self, condition: BoxedCondition) { match self { - Self::NodeConfig(config) => { + Self::ScheduleConfig(config) => { config.conditions.push(condition); } Self::Configs { @@ -223,9 +250,9 @@ impl NodeConfigs { fn chain_inner(mut self) -> Self { match &mut self { - Self::NodeConfig(_) => { /* no op */ } - Self::Configs { chained, .. } => { - chained.set_chained(); + Self::ScheduleConfig(_) => { /* no op */ } + Self::Configs { metadata, .. } => { + metadata.set_chained(); } }; self @@ -233,16 +260,16 @@ impl NodeConfigs { fn chain_ignore_deferred_inner(mut self) -> Self { match &mut self { - Self::NodeConfig(_) => { /* no op */ } - Self::Configs { chained, .. } => { - chained.set_chained_with_config(IgnoreDeferred); + Self::ScheduleConfig(_) => { /* no op */ } + Self::Configs { metadata, .. } => { + metadata.set_chained_with_config(IgnoreDeferred); } } self } } -/// Types that can convert into a [`SystemConfigs`]. +/// Types that can convert into a [`ScheduleConfigs`]. /// /// This trait is implemented for "systems" (functions whose arguments all implement /// [`SystemParam`](crate::system::SystemParam)), or tuples thereof. @@ -252,19 +279,19 @@ impl NodeConfigs { /// /// This trait should only be used as a bound for trait implementations or as an /// argument to a function. If system configs need to be returned from a -/// function or stored somewhere, use [`SystemConfigs`] instead of this trait. +/// function or stored somewhere, use [`ScheduleConfigs`] instead of this trait. /// /// # Examples /// /// ``` -/// # use bevy_ecs::schedule::IntoSystemConfigs; +/// # use bevy_ecs::{schedule::IntoScheduleConfigs, system::ScheduleSystem}; /// # struct AppMock; /// # struct Update; /// # impl AppMock { /// # pub fn add_systems( /// # &mut self, /// # schedule: Update, -/// # systems: impl IntoSystemConfigs, +/// # systems: impl IntoScheduleConfigs, /// # ) -> &mut Self { self } /// # } /// # let mut app = AppMock; @@ -286,16 +313,15 @@ impl NodeConfigs { message = "`{Self}` does not describe a valid system configuration", label = "invalid system configuration" )] -pub trait IntoSystemConfigs -where - Self: Sized, +pub trait IntoScheduleConfigs, Marker>: + Sized { - /// Convert into a [`SystemConfigs`]. - fn into_configs(self) -> SystemConfigs; + /// Convert into a [`ScheduleConfigs`]. + fn into_configs(self) -> ScheduleConfigs; /// Add these systems to the provided `set`. #[track_caller] - fn in_set(self, set: impl SystemSet) -> SystemConfigs { + fn in_set(self, set: impl SystemSet) -> ScheduleConfigs { self.into_configs().in_set(set) } @@ -307,7 +333,7 @@ where /// /// Calling [`.chain`](Self::chain) is often more convenient and ensures that all systems are added to the schedule. /// Please check the [caveats section of `.after`](Self::after) for details. - fn before(self, set: impl IntoSystemSet) -> SystemConfigs { + fn before(self, set: impl IntoSystemSet) -> ScheduleConfigs { self.into_configs().before(set) } @@ -334,7 +360,7 @@ where /// any ordering calls between them—whether using `.before`, `.after`, or `.chain`—will be silently ignored. /// /// [`configure_sets`]: https://docs.rs/bevy/latest/bevy/app/struct.App.html#method.configure_sets - fn after(self, set: impl IntoSystemSet) -> SystemConfigs { + fn after(self, set: impl IntoSystemSet) -> ScheduleConfigs { self.into_configs().after(set) } @@ -342,7 +368,7 @@ where /// /// Unlike [`before`](Self::before), this will not cause the systems in /// `set` to wait for the deferred effects of `self` to be applied. - fn before_ignore_deferred(self, set: impl IntoSystemSet) -> SystemConfigs { + fn before_ignore_deferred(self, set: impl IntoSystemSet) -> ScheduleConfigs { self.into_configs().before_ignore_deferred(set) } @@ -350,7 +376,7 @@ where /// /// Unlike [`after`](Self::after), this will not wait for the deferred /// effects of systems in `set` to be applied. - fn after_ignore_deferred(self, set: impl IntoSystemSet) -> SystemConfigs { + fn after_ignore_deferred(self, set: impl IntoSystemSet) -> ScheduleConfigs { self.into_configs().after_ignore_deferred(set) } @@ -362,7 +388,7 @@ where /// Each individual condition will be evaluated at most once (per schedule run), /// right before the corresponding system prepares to run. /// - /// This is equivalent to calling [`run_if`](IntoSystemConfigs::run_if) on each individual + /// This is equivalent to calling [`run_if`](IntoScheduleConfigs::run_if) on each individual /// system, as shown below: /// /// ``` @@ -381,10 +407,10 @@ where /// that all evaluations in a single schedule run will yield the same result. If another /// system is run inbetween two evaluations it could cause the result of the condition to change. /// - /// Use [`run_if`](IntoSystemSetConfigs::run_if) on a [`SystemSet`] if you want to make sure + /// Use [`run_if`](ScheduleConfigs::run_if) on a [`SystemSet`] if you want to make sure /// that either all or none of the systems are run, or you don't want to evaluate the run /// condition for each contained system separately. - fn distributive_run_if(self, condition: impl Condition + Clone) -> SystemConfigs { + fn distributive_run_if(self, condition: impl Condition + Clone) -> ScheduleConfigs { self.into_configs().distributive_run_if(condition) } @@ -416,21 +442,21 @@ where /// is upheld after the first system has run. You need to make sure that no other systems that /// could invalidate the condition are scheduled inbetween the first and last run system. /// - /// Use [`distributive_run_if`](IntoSystemConfigs::distributive_run_if) if you want the + /// Use [`distributive_run_if`](IntoScheduleConfigs::distributive_run_if) if you want the /// condition to be evaluated for each individual system, right before one is run. - fn run_if(self, condition: impl Condition) -> SystemConfigs { + fn run_if(self, condition: impl Condition) -> ScheduleConfigs { self.into_configs().run_if(condition) } /// Suppress warnings and errors that would result from these systems having ambiguities /// (conflicting access but indeterminate order) with systems in `set`. - fn ambiguous_with(self, set: impl IntoSystemSet) -> SystemConfigs { + fn ambiguous_with(self, set: impl IntoSystemSet) -> ScheduleConfigs { self.into_configs().ambiguous_with(set) } /// Suppress warnings and errors that would result from these systems having ambiguities /// (conflicting access but indeterminate order) with any other system. - fn ambiguous_with_all(self) -> SystemConfigs { + fn ambiguous_with_all(self) -> ScheduleConfigs { self.into_configs().ambiguous_with_all() } @@ -441,7 +467,7 @@ where /// If the preceding node on an edge has deferred parameters, an [`ApplyDeferred`](crate::schedule::ApplyDeferred) /// will be inserted on the edge. If this behavior is not desired consider using /// [`chain_ignore_deferred`](Self::chain_ignore_deferred) instead. - fn chain(self) -> SystemConfigs { + fn chain(self) -> ScheduleConfigs { self.into_configs().chain() } @@ -450,12 +476,14 @@ where /// Ordering constraints will be applied between the successive elements. /// /// Unlike [`chain`](Self::chain) this will **not** add [`ApplyDeferred`](crate::schedule::ApplyDeferred) on the edges. - fn chain_ignore_deferred(self) -> SystemConfigs { + fn chain_ignore_deferred(self) -> ScheduleConfigs { self.into_configs().chain_ignore_deferred() } } -impl IntoSystemConfigs<()> for SystemConfigs { +impl> IntoScheduleConfigs + for ScheduleConfigs +{ fn into_configs(self) -> Self { self } @@ -496,12 +524,15 @@ impl IntoSystemConfigs<()> for SystemConfigs { self } - fn distributive_run_if(mut self, condition: impl Condition + Clone) -> SystemConfigs { + fn distributive_run_if( + mut self, + condition: impl Condition + Clone, + ) -> ScheduleConfigs { self.distributive_run_if_inner(condition); self } - fn run_if(mut self, condition: impl Condition) -> SystemConfigs { + fn run_if(mut self, condition: impl Condition) -> ScheduleConfigs { self.run_if_dyn(new_condition(condition)); self } @@ -526,49 +557,65 @@ impl IntoSystemConfigs<()> for SystemConfigs { } } -/// Marker component to allow for conflicting implementations of [`IntoSystemConfigs`] +/// Marker component to allow for conflicting implementations of [`IntoScheduleConfigs`] #[doc(hidden)] pub struct Infallible; -impl IntoSystemConfigs<(Infallible, Marker)> for F +impl IntoScheduleConfigs for F where F: IntoSystem<(), (), Marker>, { - fn into_configs(self) -> SystemConfigs { + fn into_configs(self) -> ScheduleConfigs { let wrapper = InfallibleSystemWrapper::new(IntoSystem::into_system(self)); - SystemConfigs::new_system(Box::new(wrapper)) + ScheduleConfigs::ScheduleConfig(ScheduleSystem::into_config(Box::new(wrapper))) } } -/// Marker component to allow for conflicting implementations of [`IntoSystemConfigs`] +impl IntoScheduleConfigs for F +where + F: IntoSystem<(), Never, Marker>, +{ + fn into_configs(self) -> ScheduleConfigs { + let wrapper = InfallibleSystemWrapper::new(IntoSystem::into_system(self)); + ScheduleConfigs::ScheduleConfig(ScheduleSystem::into_config(Box::new(wrapper))) + } +} + +/// Marker component to allow for conflicting implementations of [`IntoScheduleConfigs`] #[doc(hidden)] pub struct Fallible; -impl IntoSystemConfigs<(Fallible, Marker)> for F +impl IntoScheduleConfigs for F where F: IntoSystem<(), Result, Marker>, { - fn into_configs(self) -> SystemConfigs { + fn into_configs(self) -> ScheduleConfigs { let boxed_system = Box::new(IntoSystem::into_system(self)); - SystemConfigs::new_system(boxed_system) + ScheduleConfigs::ScheduleConfig(ScheduleSystem::into_config(boxed_system)) } } -impl IntoSystemConfigs<()> for BoxedSystem<(), Result> { - fn into_configs(self) -> SystemConfigs { - SystemConfigs::new_system(self) +impl IntoScheduleConfigs for BoxedSystem<(), Result> { + fn into_configs(self) -> ScheduleConfigs { + ScheduleConfigs::ScheduleConfig(ScheduleSystem::into_config(self)) + } +} + +impl IntoScheduleConfigs for S { + fn into_configs(self) -> ScheduleConfigs { + ScheduleConfigs::ScheduleConfig(InternedSystemSet::into_config(self.intern())) } } #[doc(hidden)] -pub struct SystemConfigTupleMarker; +pub struct ScheduleConfigTupleMarker; -macro_rules! impl_system_collection { +macro_rules! impl_node_type_collection { ($(#[$meta:meta])* $(($param: ident, $sys: ident)),*) => { $(#[$meta])* - impl<$($param, $sys),*> IntoSystemConfigs<(SystemConfigTupleMarker, $($param,)*)> for ($($sys,)*) + impl<$($param, $sys),*, T: Schedulable> IntoScheduleConfigs for ($($sys,)*) where - $($sys: IntoSystemConfigs<$param>),* + $($sys: IntoScheduleConfigs),* { #[expect( clippy::allow_attributes, @@ -578,12 +625,12 @@ macro_rules! impl_system_collection { non_snake_case, reason = "Variable names are provided by the macro caller, not by us." )] - fn into_configs(self) -> SystemConfigs { + fn into_configs(self) -> ScheduleConfigs { let ($($sys,)*) = self; - SystemConfigs::Configs { + ScheduleConfigs::Configs { + metadata: Default::default(), configs: vec![$($sys.into_configs(),)*], collective_conditions: Vec::new(), - chained: Default::default(), } } } @@ -592,246 +639,9 @@ macro_rules! impl_system_collection { all_tuples!( #[doc(fake_variadic)] - impl_system_collection, + impl_node_type_collection, 1, 20, P, S ); - -/// A [`SystemSet`] with scheduling metadata. -pub type SystemSetConfig = NodeConfig; - -impl SystemSetConfig { - #[track_caller] - pub(super) fn new(set: InternedSystemSet) -> Self { - // system type sets are automatically populated - // to avoid unintentionally broad changes, they cannot be configured - assert!( - set.system_type().is_none(), - "configuring system type sets is not allowed" - ); - - Self { - node: set, - graph_info: GraphInfo::default(), - conditions: Vec::new(), - } - } -} - -/// A collection of [`SystemSetConfig`]. -pub type SystemSetConfigs = NodeConfigs; - -/// Types that can convert into a [`SystemSetConfigs`]. -/// -/// # Usage notes -/// -/// This trait should only be used as a bound for trait implementations or as an -/// argument to a function. If system set configs need to be returned from a -/// function or stored somewhere, use [`SystemSetConfigs`] instead of this trait. -#[diagnostic::on_unimplemented( - message = "`{Self}` does not describe a valid system set configuration", - label = "invalid system set configuration" -)] -pub trait IntoSystemSetConfigs -where - Self: Sized, -{ - /// Convert into a [`SystemSetConfigs`]. - #[doc(hidden)] - fn into_configs(self) -> SystemSetConfigs; - - /// Add these system sets to the provided `set`. - #[track_caller] - fn in_set(self, set: impl SystemSet) -> SystemSetConfigs { - self.into_configs().in_set(set) - } - - /// Runs before all systems in `set`. If `self` has any systems that produce [`Commands`](crate::system::Commands) - /// or other [`Deferred`](crate::system::Deferred) operations, all systems in `set` will see their effect. - /// - /// If automatically inserting [`ApplyDeferred`](crate::schedule::ApplyDeferred) like - /// this isn't desired, use [`before_ignore_deferred`](Self::before_ignore_deferred) instead. - fn before(self, set: impl IntoSystemSet) -> SystemSetConfigs { - self.into_configs().before(set) - } - - /// Runs after all systems in `set`. If `set` has any systems that produce [`Commands`](crate::system::Commands) - /// or other [`Deferred`](crate::system::Deferred) operations, all systems in `self` will see their effect. - /// - /// If automatically inserting [`ApplyDeferred`](crate::schedule::ApplyDeferred) like - /// this isn't desired, use [`after_ignore_deferred`](Self::after_ignore_deferred) instead. - fn after(self, set: impl IntoSystemSet) -> SystemSetConfigs { - self.into_configs().after(set) - } - - /// Run before all systems in `set`. - /// - /// Unlike [`before`](Self::before), this will not cause the systems in `set` to wait for the - /// deferred effects of `self` to be applied. - fn before_ignore_deferred(self, set: impl IntoSystemSet) -> SystemSetConfigs { - self.into_configs().before_ignore_deferred(set) - } - - /// Run after all systems in `set`. - /// - /// Unlike [`after`](Self::after), this may not see the deferred - /// effects of systems in `set` to be applied. - fn after_ignore_deferred(self, set: impl IntoSystemSet) -> SystemSetConfigs { - self.into_configs().after_ignore_deferred(set) - } - - /// Run the systems in this set(s) only if the [`Condition`] is `true`. - /// - /// The `Condition` will be evaluated at most once (per schedule run), - /// the first time a system in this set(s) prepares to run. - fn run_if(self, condition: impl Condition) -> SystemSetConfigs { - self.into_configs().run_if(condition) - } - - /// Suppress warnings and errors that would result from systems in these sets having ambiguities - /// (conflicting access but indeterminate order) with systems in `set`. - fn ambiguous_with(self, set: impl IntoSystemSet) -> SystemSetConfigs { - self.into_configs().ambiguous_with(set) - } - - /// Suppress warnings and errors that would result from systems in these sets having ambiguities - /// (conflicting access but indeterminate order) with any other system. - fn ambiguous_with_all(self) -> SystemSetConfigs { - self.into_configs().ambiguous_with_all() - } - - /// Treat this collection as a sequence of system sets. - /// - /// Ordering constraints will be applied between the successive elements. - fn chain(self) -> SystemSetConfigs { - self.into_configs().chain() - } - - /// Treat this collection as a sequence of systems. - /// - /// Ordering constraints will be applied between the successive elements. - /// - /// Unlike [`chain`](Self::chain) this will **not** add [`ApplyDeferred`](crate::schedule::ApplyDeferred) on the edges. - fn chain_ignore_deferred(self) -> SystemSetConfigs { - self.into_configs().chain_ignore_deferred() - } -} - -impl IntoSystemSetConfigs for SystemSetConfigs { - fn into_configs(self) -> Self { - self - } - - #[track_caller] - fn in_set(mut self, set: impl SystemSet) -> Self { - assert!( - set.system_type().is_none(), - "adding arbitrary systems to a system type set is not allowed" - ); - self.in_set_inner(set.intern()); - - self - } - - fn before(mut self, set: impl IntoSystemSet) -> Self { - let set = set.into_system_set(); - self.before_inner(set.intern()); - - self - } - - fn after(mut self, set: impl IntoSystemSet) -> Self { - let set = set.into_system_set(); - self.after_inner(set.intern()); - - self - } - - fn before_ignore_deferred(mut self, set: impl IntoSystemSet) -> Self { - let set = set.into_system_set(); - self.before_ignore_deferred_inner(set.intern()); - - self - } - - fn after_ignore_deferred(mut self, set: impl IntoSystemSet) -> Self { - let set = set.into_system_set(); - self.after_ignore_deferred_inner(set.intern()); - - self - } - - fn run_if(mut self, condition: impl Condition) -> SystemSetConfigs { - self.run_if_dyn(new_condition(condition)); - - self - } - - fn ambiguous_with(mut self, set: impl IntoSystemSet) -> Self { - let set = set.into_system_set(); - self.ambiguous_with_inner(set.intern()); - - self - } - - fn ambiguous_with_all(mut self) -> Self { - self.ambiguous_with_all_inner(); - - self - } - - fn chain(self) -> Self { - self.chain_inner() - } - - fn chain_ignore_deferred(self) -> Self { - self.chain_ignore_deferred_inner() - } -} - -impl IntoSystemSetConfigs for S { - fn into_configs(self) -> SystemSetConfigs { - SystemSetConfigs::NodeConfig(SystemSetConfig::new(self.intern())) - } -} - -impl IntoSystemSetConfigs for SystemSetConfig { - fn into_configs(self) -> SystemSetConfigs { - SystemSetConfigs::NodeConfig(self) - } -} - -macro_rules! impl_system_set_collection { - ($(#[$meta:meta])* $($set: ident),*) => { - $(#[$meta])* - impl<$($set: IntoSystemSetConfigs),*> IntoSystemSetConfigs for ($($set,)*) - { - #[expect( - clippy::allow_attributes, - reason = "We are inside a macro, and as such, `non_snake_case` is not guaranteed to apply." - )] - #[allow( - non_snake_case, - reason = "Variable names are provided by the macro caller, not by us." - )] - fn into_configs(self) -> SystemSetConfigs { - let ($($set,)*) = self; - SystemSetConfigs::Configs { - configs: vec![$($set.into_configs(),)*], - collective_conditions: Vec::new(), - chained: Default::default(), - } - } - } - } -} - -all_tuples!( - #[doc(fake_variadic)] - impl_system_set_collection, - 1, - 20, - S -); diff --git a/crates/bevy_ecs/src/schedule/executor/mod.rs b/crates/bevy_ecs/src/schedule/executor/mod.rs index fc9b50e700..0a78b5805d 100644 --- a/crates/bevy_ecs/src/schedule/executor/mod.rs +++ b/crates/bevy_ecs/src/schedule/executor/mod.rs @@ -16,11 +16,11 @@ use fixedbitset::FixedBitSet; use crate::{ archetype::ArchetypeComponentId, component::{ComponentId, Tick}, + error::{BevyError, ErrorContext, Result}, prelude::{IntoSystemSet, SystemSet}, query::Access, - result::Result, schedule::{BoxedCondition, InternedSystemSet, NodeId, SystemTypeSet}, - system::{ScheduleSystem, System, SystemIn}, + system::{ScheduleSystem, System, SystemIn, SystemParamValidationError}, world::{unsafe_world_cell::UnsafeWorldCell, DeferredWorld, World}, }; @@ -33,6 +33,7 @@ pub(super) trait SystemExecutor: Send + Sync { schedule: &mut SystemSchedule, world: &mut World, skip_systems: Option<&FixedBitSet>, + error_handler: fn(BevyError, ErrorContext), ); fn set_apply_final_deferred(&mut self, value: bool); } @@ -220,10 +221,13 @@ impl System for ApplyDeferred { fn queue_deferred(&mut self, _world: DeferredWorld) {} - unsafe fn validate_param_unsafe(&mut self, _world: UnsafeWorldCell) -> bool { + unsafe fn validate_param_unsafe( + &mut self, + _world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { // This system is always valid to run because it doesn't do anything, // and only used as a marker for the executor. - true + Ok(()) } fn initialize(&mut self, _world: &mut World) {} @@ -264,7 +268,7 @@ mod __rust_begin_short_backtrace { use core::hint::black_box; use crate::{ - result::Result, + error::Result, system::{ReadOnlySystem, ScheduleSystem}, world::{unsafe_world_cell::UnsafeWorldCell, World}, }; @@ -311,17 +315,14 @@ mod __rust_begin_short_backtrace { #[cfg(test)] mod tests { use crate::{ - prelude::{IntoSystemConfigs, IntoSystemSetConfigs, Resource, Schedule, SystemSet}, + prelude::{Component, In, IntoSystem, Resource, Schedule}, schedule::ExecutorKind, - system::{Commands, Res, WithParamWarnPolicy}, + system::{Populated, Res, ResMut, Single}, world::World, }; - #[derive(Resource)] - struct R1; - - #[derive(Resource)] - struct R2; + #[derive(Component)] + struct TestComponent; const EXECUTORS: [ExecutorKind; 3] = [ ExecutorKind::Simple, @@ -329,63 +330,243 @@ mod tests { ExecutorKind::MultiThreaded, ]; + #[derive(Resource, Default)] + struct TestState { + populated_ran: bool, + single_ran: bool, + } + + #[derive(Resource, Default)] + struct Counter(u8); + + fn set_single_state(mut _single: Single<&TestComponent>, mut state: ResMut) { + state.single_ran = true; + } + + fn set_populated_state( + mut _populated: Populated<&TestComponent>, + mut state: ResMut, + ) { + state.populated_ran = true; + } + #[test] - fn invalid_system_param_skips() { + #[expect(clippy::print_stdout, reason = "std and println are allowed in tests")] + fn single_and_populated_skipped_and_run() { for executor in EXECUTORS { - invalid_system_param_skips_core(executor); + std::println!("Testing executor: {:?}", executor); + + let mut world = World::new(); + world.init_resource::(); + + let mut schedule = Schedule::default(); + schedule.set_executor_kind(executor); + schedule.add_systems((set_single_state, set_populated_state)); + schedule.run(&mut world); + + let state = world.get_resource::().unwrap(); + assert!(!state.single_ran); + assert!(!state.populated_ran); + + world.spawn(TestComponent); + + schedule.run(&mut world); + let state = world.get_resource::().unwrap(); + assert!(state.single_ran); + assert!(state.populated_ran); } } - fn invalid_system_param_skips_core(executor: ExecutorKind) { - let mut world = World::new(); - let mut schedule = Schedule::default(); - schedule.set_executor_kind(executor); - schedule.add_systems( - ( - // This system depends on a system that is always skipped. - (|mut commands: Commands| { - commands.insert_resource(R2); - }) - .warn_param_missing(), - ) - .chain(), - ); - schedule.run(&mut world); - assert!(world.get_resource::().is_none()); - assert!(world.get_resource::().is_some()); - } - - #[derive(SystemSet, Hash, Debug, PartialEq, Eq, Clone)] - struct S1; + fn look_for_missing_resource(_res: Res) {} #[test] - fn invalid_condition_param_skips_system() { - for executor in EXECUTORS { - invalid_condition_param_skips_system_core(executor); - } - } - - fn invalid_condition_param_skips_system_core(executor: ExecutorKind) { + #[should_panic] + fn missing_resource_panics_simple() { let mut world = World::new(); let mut schedule = Schedule::default(); - schedule.set_executor_kind(executor); - schedule.configure_sets(S1.run_if((|_: Res| true).warn_param_missing())); - schedule.add_systems(( - // System gets skipped if system set run conditions fail validation. - (|mut commands: Commands| { - commands.insert_resource(R1); - }) - .warn_param_missing() - .in_set(S1), - // System gets skipped if run conditions fail validation. - (|mut commands: Commands| { - commands.insert_resource(R2); - }) - .warn_param_missing() - .run_if((|_: Res| true).warn_param_missing()), - )); + + schedule.set_executor_kind(ExecutorKind::Simple); + schedule.add_systems(look_for_missing_resource); schedule.run(&mut world); - assert!(world.get_resource::().is_none()); - assert!(world.get_resource::().is_none()); + } + + #[test] + #[should_panic] + fn missing_resource_panics_single_threaded() { + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.set_executor_kind(ExecutorKind::SingleThreaded); + schedule.add_systems(look_for_missing_resource); + schedule.run(&mut world); + } + + #[test] + #[should_panic] + fn missing_resource_panics_multi_threaded() { + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.set_executor_kind(ExecutorKind::MultiThreaded); + schedule.add_systems(look_for_missing_resource); + schedule.run(&mut world); + } + + #[test] + fn piped_systems_first_system_skipped() { + // This system should be skipped when run due to no matching entity + fn pipe_out(_single: Single<&TestComponent>) -> u8 { + 42 + } + + fn pipe_in(_input: In, mut counter: ResMut) { + counter.0 += 1; + } + + let mut world = World::new(); + world.init_resource::(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + + let counter = world.resource::(); + assert_eq!(counter.0, 0); + } + + #[test] + fn piped_system_second_system_skipped() { + fn pipe_out(mut counter: ResMut) -> u8 { + counter.0 += 1; + 42 + } + + // This system should be skipped when run due to no matching entity + fn pipe_in(_input: In, _single: Single<&TestComponent>) {} + + let mut world = World::new(); + world.init_resource::(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + let counter = world.resource::(); + assert_eq!(counter.0, 0); + } + + #[test] + #[should_panic] + fn piped_system_first_system_panics() { + // This system should panic when run because the resource is missing + fn pipe_out(_res: Res) -> u8 { + 42 + } + + fn pipe_in(_input: In) {} + + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + } + + #[test] + #[should_panic] + fn piped_system_second_system_panics() { + fn pipe_out() -> u8 { + 42 + } + + // This system should panic when run because the resource is missing + fn pipe_in(_input: In, _res: Res) {} + + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + } + + // This test runs without panicking because we've + // decided to use early-out behavior for piped systems + #[test] + fn piped_system_skip_and_panic() { + // This system should be skipped when run due to no matching entity + fn pipe_out(_single: Single<&TestComponent>) -> u8 { + 42 + } + + // This system should panic when run because the resource is missing + fn pipe_in(_input: In, _res: Res) {} + + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + } + + #[test] + #[should_panic] + fn piped_system_panic_and_skip() { + // This system should panic when run because the resource is missing + + fn pipe_out(_res: Res) -> u8 { + 42 + } + + // This system should be skipped when run due to no matching entity + fn pipe_in(_input: In, _single: Single<&TestComponent>) {} + + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + } + + #[test] + #[should_panic] + fn piped_system_panic_and_panic() { + // This system should panic when run because the resource is missing + + fn pipe_out(_res: Res) -> u8 { + 42 + } + + // This system should panic when run because the resource is missing + fn pipe_in(_input: In, _res: Res) {} + + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + } + + #[test] + fn piped_system_skip_and_skip() { + // This system should be skipped when run due to no matching entity + + fn pipe_out(_single: Single<&TestComponent>, mut counter: ResMut) -> u8 { + counter.0 += 1; + 42 + } + + // This system should be skipped when run due to no matching entity + fn pipe_in(_input: In, _single: Single<&TestComponent>, mut counter: ResMut) { + counter.0 += 1; + } + + let mut world = World::new(); + world.init_resource::(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + + let counter = world.resource::(); + assert_eq!(counter.0, 0); } } diff --git a/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs b/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs index 2580207db2..dd029c91c8 100644 --- a/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs +++ b/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs @@ -1,20 +1,20 @@ use alloc::{boxed::Box, vec::Vec}; -use bevy_platform_support::sync::Arc; +use bevy_platform::sync::Arc; use bevy_tasks::{ComputeTaskPool, Scope, TaskPool, ThreadExecutor}; use bevy_utils::{default, syncunsafecell::SyncUnsafeCell}; use concurrent_queue::ConcurrentQueue; use core::{any::Any, panic::AssertUnwindSafe}; use fixedbitset::FixedBitSet; -use std::{ - eprintln, - sync::{Mutex, MutexGuard}, -}; +#[cfg(feature = "std")] +use std::eprintln; +use std::sync::{Mutex, MutexGuard}; #[cfg(feature = "trace")] use tracing::{info_span, Span}; use crate::{ archetype::ArchetypeComponentId, + error::{default_error_handler, BevyError, ErrorContext, Result}, prelude::Resource, query::Access, schedule::{is_apply_deferred, BoxedCondition, ExecutorKind, SystemExecutor, SystemSchedule}, @@ -131,6 +131,7 @@ pub struct ExecutorState { struct Context<'scope, 'env, 'sys> { environment: &'env Environment<'env, 'sys>, scope: &'scope Scope<'scope, 'env, ()>, + error_handler: fn(BevyError, ErrorContext), } impl Default for MultiThreadedExecutor { @@ -181,6 +182,7 @@ impl SystemExecutor for MultiThreadedExecutor { schedule: &mut SystemSchedule, world: &mut World, _skip_systems: Option<&FixedBitSet>, + error_handler: fn(BevyError, ErrorContext), ) { let state = self.state.get_mut().unwrap(); // reset counts @@ -220,7 +222,11 @@ impl SystemExecutor for MultiThreadedExecutor { false, thread_executor, |scope| { - let context = Context { environment, scope }; + let context = Context { + environment, + scope, + error_handler, + }; // The first tick won't need to process finished systems, but we still need to run the loop in // tick_executor() in case a system completes while the first tick still holds the mutex. @@ -276,7 +282,11 @@ impl<'scope, 'env: 'scope, 'sys> Context<'scope, 'env, 'sys> { .push(SystemResult { system_index }) .unwrap_or_else(|error| unreachable!("{}", error)); if let Err(payload) = res { - eprintln!("Encountered a panic in system `{}`!", &*system.name()); + #[cfg(feature = "std")] + #[expect(clippy::print_stderr, reason = "Allowed behind `std` feature gate.")] + { + eprintln!("Encountered a panic in system `{}`!", &*system.name()); + } // set the payload to propagate the error { let mut panic_payload = self.environment.executor.panic_payload.lock().unwrap(); @@ -442,6 +452,8 @@ impl ExecutorState { // SAFETY: // - Caller ensured no other reference to this system exists. + // - `system_task_metadata[system_index].is_exclusive` is `false`, + // so `System::is_exclusive` returned `false` when we called it. // - `can_run` has been called, which calls `update_archetype_component_access` with this system. // - `can_run` returned true, so no systems with conflicting world access are running. unsafe { @@ -526,6 +538,7 @@ impl ExecutorState { world: UnsafeWorldCell, ) -> bool { let mut should_run = !self.skipped_systems.contains(system_index); + let error_handler = default_error_handler(); for set_idx in conditions.sets_with_conditions_of_systems[system_index].ones() { if self.evaluated_sets.contains(set_idx) { @@ -570,10 +583,25 @@ impl ExecutorState { // - The caller ensures that `world` has permission to read any data // required by the system. // - `update_archetype_component_access` has been called for system. - let valid_params = unsafe { system.validate_param_unsafe(world) }; + let valid_params = match unsafe { system.validate_param_unsafe(world) } { + Ok(()) => true, + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, + ); + } + false + } + }; if !valid_params { self.skipped_systems.insert(system_index); } + should_run &= valid_params; } @@ -582,6 +610,7 @@ impl ExecutorState { /// # Safety /// - Caller must not alias systems that are running. + /// - `is_exclusive` must have returned `false` for the specified system. /// - `world` must have permission to access the world data /// used by the specified system. /// - `update_archetype_component_access` must have been called with `world` @@ -599,19 +628,21 @@ impl ExecutorState { // SAFETY: // - The caller ensures that we have permission to // access the world data used by the system. + // - `is_exclusive` returned false // - `update_archetype_component_access` has been called. unsafe { - // TODO: implement an error-handling API instead of panicking. if let Err(err) = __rust_begin_short_backtrace::run_unsafe( system, context.environment.world_cell, ) { - panic!( - "Encountered an error in system `{}`: {:?}", - &*system.name(), - err + (context.error_handler)( + err, + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, ); - }; + } }; })); context.system_completed(system_index, res, system); @@ -655,14 +686,15 @@ impl ExecutorState { // that no other systems currently have access to the world. let world = unsafe { context.environment.world_cell.world_mut() }; let res = std::panic::catch_unwind(AssertUnwindSafe(|| { - // TODO: implement an error-handling API instead of panicking. if let Err(err) = __rust_begin_short_backtrace::run(system, world) { - panic!( - "Encountered an error in system `{}`: {:?}", - &*system.name(), - err + (context.error_handler)( + err, + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, ); - }; + } })); context.system_completed(system_index, res, system); }; @@ -732,10 +764,14 @@ fn apply_deferred( system.apply_deferred(world); })); if let Err(payload) = res { - eprintln!( - "Encountered a panic when applying buffers for system `{}`!", - &*system.name() - ); + #[cfg(feature = "std")] + #[expect(clippy::print_stderr, reason = "Allowed behind `std` feature gate.")] + { + eprintln!( + "Encountered a panic when applying buffers for system `{}`!", + &*system.name() + ); + } return Err(payload); } } @@ -751,6 +787,8 @@ unsafe fn evaluate_and_fold_conditions( conditions: &mut [BoxedCondition], world: UnsafeWorldCell, ) -> bool { + let error_handler = default_error_handler(); + #[expect( clippy::unnecessary_fold, reason = "Short-circuiting here would prevent conditions from mutating their own state as needed." @@ -762,8 +800,20 @@ unsafe fn evaluate_and_fold_conditions( // - The caller ensures that `world` has permission to read any data // required by the condition. // - `update_archetype_component_access` has been called for condition. - if !unsafe { condition.validate_param_unsafe(world) } { - return false; + match unsafe { condition.validate_param_unsafe(world) } { + Ok(()) => (), + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::System { + name: condition.name(), + last_run: condition.get_last_run(), + }, + ); + } + return false; + } } // SAFETY: // - The caller ensures that `world` has permission to read any data @@ -795,7 +845,7 @@ impl MainThreadExecutor { mod tests { use crate::{ prelude::Resource, - schedule::{ExecutorKind, IntoSystemConfigs, Schedule}, + schedule::{ExecutorKind, IntoScheduleConfigs, Schedule}, system::Commands, world::World, }; diff --git a/crates/bevy_ecs/src/schedule/executor/simple.rs b/crates/bevy_ecs/src/schedule/executor/simple.rs index 81f7deab3a..a237a356de 100644 --- a/crates/bevy_ecs/src/schedule/executor/simple.rs +++ b/crates/bevy_ecs/src/schedule/executor/simple.rs @@ -8,6 +8,7 @@ use tracing::info_span; use std::eprintln; use crate::{ + error::{default_error_handler, BevyError, ErrorContext}, schedule::{ executor::is_apply_deferred, BoxedCondition, ExecutorKind, SystemExecutor, SystemSchedule, }, @@ -43,6 +44,7 @@ impl SystemExecutor for SimpleExecutor { schedule: &mut SystemSchedule, world: &mut World, _skip_systems: Option<&FixedBitSet>, + error_handler: fn(BevyError, ErrorContext), ) { // If stepping is enabled, make sure we skip those systems that should // not be run. @@ -85,7 +87,21 @@ impl SystemExecutor for SimpleExecutor { let system = &mut schedule.systems[system_index]; if should_run { - let valid_params = system.validate_param(world); + let valid_params = match system.validate_param(world) { + Ok(()) => true, + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, + ); + } + false + } + }; should_run &= valid_params; } @@ -104,17 +120,19 @@ impl SystemExecutor for SimpleExecutor { } let f = AssertUnwindSafe(|| { - // TODO: implement an error-handling API instead of panicking. if let Err(err) = __rust_begin_short_backtrace::run(system, world) { - panic!( - "Encountered an error in system `{}`: {:?}", - &*system.name(), - err + error_handler( + err, + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, ); } }); #[cfg(feature = "std")] + #[expect(clippy::print_stderr, reason = "Allowed behind `std` feature gate.")] { if let Err(payload) = std::panic::catch_unwind(f) { eprintln!("Encountered a panic in system `{}`!", &*system.name()); @@ -149,6 +167,8 @@ impl SimpleExecutor { } fn evaluate_and_fold_conditions(conditions: &mut [BoxedCondition], world: &mut World) -> bool { + let error_handler = default_error_handler(); + #[expect( clippy::unnecessary_fold, reason = "Short-circuiting here would prevent conditions from mutating their own state as needed." @@ -156,8 +176,20 @@ fn evaluate_and_fold_conditions(conditions: &mut [BoxedCondition], world: &mut W conditions .iter_mut() .map(|condition| { - if !condition.validate_param(world) { - return false; + match condition.validate_param(world) { + Ok(()) => (), + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::System { + name: condition.name(), + last_run: condition.get_last_run(), + }, + ); + } + return false; + } } __rust_begin_short_backtrace::readonly_run(&mut **condition, world) }) diff --git a/crates/bevy_ecs/src/schedule/executor/single_threaded.rs b/crates/bevy_ecs/src/schedule/executor/single_threaded.rs index 8c5a7e0261..b42f47726d 100644 --- a/crates/bevy_ecs/src/schedule/executor/single_threaded.rs +++ b/crates/bevy_ecs/src/schedule/executor/single_threaded.rs @@ -8,6 +8,7 @@ use tracing::info_span; use std::eprintln; use crate::{ + error::{default_error_handler, BevyError, ErrorContext}, schedule::{is_apply_deferred, BoxedCondition, ExecutorKind, SystemExecutor, SystemSchedule}, world::World, }; @@ -49,6 +50,7 @@ impl SystemExecutor for SingleThreadedExecutor { schedule: &mut SystemSchedule, world: &mut World, _skip_systems: Option<&FixedBitSet>, + error_handler: fn(BevyError, ErrorContext), ) { // If stepping is enabled, make sure we skip those systems that should // not be run. @@ -91,7 +93,22 @@ impl SystemExecutor for SingleThreadedExecutor { let system = &mut schedule.systems[system_index]; if should_run { - let valid_params = system.validate_param(world); + let valid_params = match system.validate_param(world) { + Ok(()) => true, + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, + ); + } + false + } + }; + should_run &= valid_params; } @@ -112,12 +129,13 @@ impl SystemExecutor for SingleThreadedExecutor { let f = AssertUnwindSafe(|| { if system.is_exclusive() { - // TODO: implement an error-handling API instead of panicking. if let Err(err) = __rust_begin_short_backtrace::run(system, world) { - panic!( - "Encountered an error in system `{}`: {:?}", - &*system.name(), - err + error_handler( + err, + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, ); } } else { @@ -127,12 +145,13 @@ impl SystemExecutor for SingleThreadedExecutor { // SAFETY: We have exclusive, single-threaded access to the world and // update_archetype_component_access is being called immediately before this. unsafe { - // TODO: implement an error-handling API instead of panicking. if let Err(err) = __rust_begin_short_backtrace::run_unsafe(system, world) { - panic!( - "Encountered an error in system `{}`: {:?}", - &*system.name(), - err + error_handler( + err, + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, ); } }; @@ -140,6 +159,7 @@ impl SystemExecutor for SingleThreadedExecutor { }); #[cfg(feature = "std")] + #[expect(clippy::print_stderr, reason = "Allowed behind `std` feature gate.")] { if let Err(payload) = std::panic::catch_unwind(f) { eprintln!("Encountered a panic in system `{}`!", &*system.name()); @@ -191,6 +211,8 @@ impl SingleThreadedExecutor { } fn evaluate_and_fold_conditions(conditions: &mut [BoxedCondition], world: &mut World) -> bool { + let error_handler: fn(BevyError, ErrorContext) = default_error_handler(); + #[expect( clippy::unnecessary_fold, reason = "Short-circuiting here would prevent conditions from mutating their own state as needed." @@ -198,8 +220,20 @@ fn evaluate_and_fold_conditions(conditions: &mut [BoxedCondition], world: &mut W conditions .iter_mut() .map(|condition| { - if !condition.validate_param(world) { - return false; + match condition.validate_param(world) { + Ok(()) => (), + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::System { + name: condition.name(), + last_run: condition.get_last_run(), + }, + ); + } + return false; + } } __rust_begin_short_backtrace::readonly_run(&mut **condition, world) }) diff --git a/crates/bevy_ecs/src/schedule/graph/graph_map.rs b/crates/bevy_ecs/src/schedule/graph/graph_map.rs index b255e55d26..d2fbde9995 100644 --- a/crates/bevy_ecs/src/schedule/graph/graph_map.rs +++ b/crates/bevy_ecs/src/schedule/graph/graph_map.rs @@ -5,7 +5,7 @@ //! [`petgraph`]: https://docs.rs/petgraph/0.6.5/petgraph/ use alloc::vec::Vec; -use bevy_platform_support::{collections::HashSet, hash::FixedHasher}; +use bevy_platform::{collections::HashSet, hash::FixedHasher}; use core::{ fmt, hash::{BuildHasher, Hash}, diff --git a/crates/bevy_ecs/src/schedule/graph/mod.rs b/crates/bevy_ecs/src/schedule/graph/mod.rs index ed25b612ea..8a98604102 100644 --- a/crates/bevy_ecs/src/schedule/graph/mod.rs +++ b/crates/bevy_ecs/src/schedule/graph/mod.rs @@ -5,7 +5,7 @@ use core::{ }; use smallvec::SmallVec; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_utils::TypeIdMap; use fixedbitset::FixedBitSet; @@ -62,7 +62,7 @@ pub(crate) enum Ambiguity { /// Metadata about how the node fits in the schedule graph #[derive(Default)] -pub(crate) struct GraphInfo { +pub struct GraphInfo { /// the sets that the node belongs to (hierarchy) pub(crate) hierarchy: Vec, /// the sets that the node depends on (must run before or after) @@ -276,7 +276,7 @@ pub fn simple_cycles_in_component(graph: &DiGraph, scc: &[NodeId]) -> Vec = schedule .graph() .conflicts_to_string(schedule.graph().conflicting_systems(), world.components()) + .map(|item| { + ( + item.0, + item.1, + item.2 + .into_iter() + .map(|name| name.to_string()) + .collect::>(), + ) + }) .collect(); let expected = &[ ( "system_d".to_string(), "system_a".to_string(), - vec!["bevy_ecs::schedule::tests::system_ambiguity::R"], + vec!["bevy_ecs::schedule::tests::system_ambiguity::R".into()], ), ( "system_d".to_string(), "system_e".to_string(), - vec!["bevy_ecs::schedule::tests::system_ambiguity::R"], + vec!["bevy_ecs::schedule::tests::system_ambiguity::R".into()], ), ( "system_b".to_string(), "system_a".to_string(), - vec!["bevy_ecs::schedule::tests::system_ambiguity::R"], + vec!["bevy_ecs::schedule::tests::system_ambiguity::R".into()], ), ( "system_b".to_string(), "system_e".to_string(), - vec!["bevy_ecs::schedule::tests::system_ambiguity::R"], + vec!["bevy_ecs::schedule::tests::system_ambiguity::R".into()], ), ]; @@ -1146,6 +1156,16 @@ mod tests { let ambiguities: Vec<_> = schedule .graph() .conflicts_to_string(schedule.graph().conflicting_systems(), world.components()) + .map(|item| { + ( + item.0, + item.1, + item.2 + .into_iter() + .map(|name| name.to_string()) + .collect::>(), + ) + }) .collect(); assert_eq!( @@ -1153,7 +1173,7 @@ mod tests { ( "resmut_system (in set (resmut_system, resmut_system))".to_string(), "resmut_system (in set (resmut_system, resmut_system))".to_string(), - vec!["bevy_ecs::schedule::tests::system_ambiguity::R"], + vec!["bevy_ecs::schedule::tests::system_ambiguity::R".into()], ) ); } @@ -1192,7 +1212,7 @@ mod tests { let mut schedule = Schedule::new(TestSchedule); schedule .set_executor_kind($executor) - .add_systems(|| panic!("Executor ignored Stepping")); + .add_systems(|| -> () { panic!("Executor ignored Stepping") }); // Add our schedule to stepping & and enable stepping; this should // prevent any systems in the schedule from running diff --git a/crates/bevy_ecs/src/schedule/schedule.rs b/crates/bevy_ecs/src/schedule/schedule.rs index 890263ca23..584e621bf8 100644 --- a/crates/bevy_ecs/src/schedule/schedule.rs +++ b/crates/bevy_ecs/src/schedule/schedule.rs @@ -2,6 +2,7 @@ clippy::module_inception, reason = "This instance of module inception is being discussed; see #17344." )] +use alloc::borrow::Cow; use alloc::{ boxed::Box, collections::{BTreeMap, BTreeSet}, @@ -10,7 +11,7 @@ use alloc::{ vec, vec::Vec, }; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_utils::{default, TypeIdMap}; use core::{ any::{Any, TypeId}, @@ -26,9 +27,9 @@ use tracing::info_span; use crate::{ component::{ComponentId, Components, Tick}, + error::default_error_handler, prelude::Component, resource::Resource, - result::Result, schedule::*, system::ScheduleSystem, world::World, @@ -49,10 +50,7 @@ pub struct Schedules { impl Schedules { /// Constructs an empty `Schedules` with zero initial capacity. pub fn new() -> Self { - Self { - inner: HashMap::default(), - ignored_scheduling_ambiguities: BTreeSet::new(), - } + Self::default() } /// Inserts a labeled schedule into the map. @@ -149,7 +147,7 @@ impl Schedules { /// Ignore system order ambiguities caused by conflicts on [`Resource`]s of type `T`. pub fn allow_ambiguous_resource(&mut self, world: &mut World) { self.ignored_scheduling_ambiguities - .insert(world.components.register_resource::()); + .insert(world.components_registrator().register_resource::()); } /// Iterate through the [`ComponentId`]'s that will be ignored. @@ -176,7 +174,7 @@ impl Schedules { pub fn add_systems( &mut self, schedule: impl ScheduleLabel, - systems: impl IntoSystemConfigs, + systems: impl IntoScheduleConfigs, ) -> &mut Self { self.entry(schedule).add_systems(systems); @@ -185,10 +183,10 @@ impl Schedules { /// Configures a collection of system sets in the provided schedule, adding any sets that do not exist. #[track_caller] - pub fn configure_sets( + pub fn configure_sets( &mut self, schedule: impl ScheduleLabel, - sets: impl IntoSystemSetConfigs, + sets: impl IntoScheduleConfigs, ) -> &mut Self { self.entry(schedule).configure_sets(sets); @@ -335,7 +333,10 @@ impl Schedule { } /// Add a collection of systems to the schedule. - pub fn add_systems(&mut self, systems: impl IntoSystemConfigs) -> &mut Self { + pub fn add_systems( + &mut self, + systems: impl IntoScheduleConfigs, + ) -> &mut Self { self.graph.process_configs(systems.into_configs(), false); self } @@ -373,7 +374,10 @@ impl Schedule { /// Configures a collection of system sets in this schedule, adding them if they does not exist. #[track_caller] - pub fn configure_sets(&mut self, sets: impl IntoSystemSetConfigs) -> &mut Self { + pub fn configure_sets( + &mut self, + sets: impl IntoScheduleConfigs, + ) -> &mut Self { self.graph.configure_sets(sets); self } @@ -437,8 +441,11 @@ impl Schedule { self.initialize(world) .unwrap_or_else(|e| panic!("Error when initializing schedule {:?}: {e}", self.label)); + let error_handler = default_error_handler(); + #[cfg(not(feature = "bevy_debug_stepping"))] - self.executor.run(&mut self.executable, world, None); + self.executor + .run(&mut self.executable, world, None, error_handler); #[cfg(feature = "bevy_debug_stepping")] { @@ -447,8 +454,12 @@ impl Schedule { Some(mut stepping) => stepping.skipped_systems(self), }; - self.executor - .run(&mut self.executable, world, skip_systems.as_ref()); + self.executor.run( + &mut self.executable, + world, + skip_systems.as_ref(), + error_handler, + ); } } @@ -741,6 +752,26 @@ impl ScheduleGraph { .unwrap() } + /// Returns the conditions for the set at the given [`NodeId`], if it exists. + pub fn get_set_conditions_at(&self, id: NodeId) -> Option<&[BoxedCondition]> { + if !id.is_set() { + return None; + } + self.system_set_conditions + .get(id.index()) + .map(Vec::as_slice) + } + + /// Returns the conditions for the set at the given [`NodeId`]. + /// + /// Panics if it doesn't exist. + #[track_caller] + pub fn set_conditions_at(&self, id: NodeId) -> &[BoxedCondition] { + self.get_set_conditions_at(id) + .ok_or_else(|| format!("set with id {id:?} does not exist in this Schedule")) + .unwrap() + } + /// Returns an iterator over all systems in this schedule, along with the conditions for each system. pub fn systems(&self) -> impl Iterator { self.systems @@ -788,9 +819,9 @@ impl ScheduleGraph { &self.conflicting_systems } - fn process_config( + fn process_config( &mut self, - config: NodeConfig, + config: ScheduleConfig, collect_nodes: bool, ) -> ProcessConfigsResult { ProcessConfigsResult { @@ -802,9 +833,11 @@ impl ScheduleGraph { } } - fn apply_collective_conditions( + fn apply_collective_conditions< + T: ProcessScheduleConfig + Schedulable, + >( &mut self, - configs: &mut [NodeConfigs], + configs: &mut [ScheduleConfigs], collective_conditions: Vec, ) { if !collective_conditions.is_empty() { @@ -817,7 +850,7 @@ impl ScheduleGraph { for config in configs.iter_mut() { config.in_set_inner(set.intern()); } - let mut set_config = SystemSetConfig::new(set.intern()); + let mut set_config = InternedSystemSet::into_config(set.intern()); set_config.conditions.extend(collective_conditions); self.configure_set_inner(set_config).unwrap(); } @@ -830,24 +863,26 @@ impl ScheduleGraph { /// `process_config` is the function which processes each individual config node and returns a corresponding `NodeId`. /// /// The fields on the returned [`ProcessConfigsResult`] are: - /// - `nodes`: a vector of all node ids contained in the nested `NodeConfigs` + /// - `nodes`: a vector of all node ids contained in the nested `ScheduleConfigs` /// - `densely_chained`: a boolean that is true if all nested nodes are linearly chained (with successive `after` orderings) in the order they are defined #[track_caller] - fn process_configs( + fn process_configs< + T: ProcessScheduleConfig + Schedulable, + >( &mut self, - configs: NodeConfigs, + configs: ScheduleConfigs, collect_nodes: bool, ) -> ProcessConfigsResult { match configs { - NodeConfigs::NodeConfig(config) => self.process_config(config, collect_nodes), - NodeConfigs::Configs { + ScheduleConfigs::ScheduleConfig(config) => self.process_config(config, collect_nodes), + ScheduleConfigs::Configs { + metadata, mut configs, collective_conditions, - chained, } => { self.apply_collective_conditions(&mut configs, collective_conditions); - let is_chained = matches!(chained, Chain::Chained(_)); + let is_chained = matches!(metadata, Chain::Chained(_)); // Densely chained if // * chained and all configs in the chain are densely chained, or @@ -869,7 +904,7 @@ impl ScheduleGraph { let current_result = self.process_configs(current, collect_nodes || is_chained); densely_chained &= current_result.densely_chained; - if let Chain::Chained(chain_options) = &chained { + if let Chain::Chained(chain_options) = &metadata { // if the current result is densely chained, we only need to chain the first node let current_nodes = if current_result.densely_chained { ¤t_result.nodes[..1] @@ -917,12 +952,15 @@ impl ScheduleGraph { } } - /// Add a [`SystemConfig`] to the graph, including its dependencies and conditions. - fn add_system_inner(&mut self, config: SystemConfig) -> Result { + /// Add a [`ScheduleConfig`] to the graph, including its dependencies and conditions. + fn add_system_inner( + &mut self, + config: ScheduleConfig, + ) -> Result { let id = NodeId::System(self.systems.len()); // graph updates are immediate - self.update_graphs(id, config.graph_info)?; + self.update_graphs(id, config.metadata)?; // system init has to be deferred (need `&mut World`) self.uninit.push((id, 0)); @@ -933,15 +971,18 @@ impl ScheduleGraph { } #[track_caller] - fn configure_sets(&mut self, sets: impl IntoSystemSetConfigs) { + fn configure_sets(&mut self, sets: impl IntoScheduleConfigs) { self.process_configs(sets.into_configs(), false); } - /// Add a single `SystemSetConfig` to the graph, including its dependencies and conditions. - fn configure_set_inner(&mut self, set: SystemSetConfig) -> Result { - let SystemSetConfig { + /// Add a single `ScheduleConfig` to the graph, including its dependencies and conditions. + fn configure_set_inner( + &mut self, + set: ScheduleConfig, + ) -> Result { + let ScheduleConfig { node: set, - graph_info, + metadata, mut conditions, } = set; @@ -951,7 +992,7 @@ impl ScheduleGraph { }; // graph updates are immediate - self.update_graphs(id, graph_info)?; + self.update_graphs(id, metadata)?; // system init has to be deferred (need `&mut World`) let system_set_conditions = &mut self.system_set_conditions[id.index()]; @@ -1508,7 +1549,7 @@ impl ScheduleGraph { /// Values returned by [`ScheduleGraph::process_configs`] struct ProcessConfigsResult { - /// All nodes contained inside this `process_configs` call's [`NodeConfigs`] hierarchy, + /// All nodes contained inside this `process_configs` call's [`ScheduleConfigs`] hierarchy, /// if `ancestor_chained` is true nodes: Vec, /// True if and only if all nodes are "densely chained", meaning that all nested nodes @@ -1517,20 +1558,20 @@ struct ProcessConfigsResult { densely_chained: bool, } -/// Trait used by [`ScheduleGraph::process_configs`] to process a single [`NodeConfig`]. -trait ProcessNodeConfig: Sized { - /// Process a single [`NodeConfig`]. - fn process_config(schedule_graph: &mut ScheduleGraph, config: NodeConfig) -> NodeId; +/// Trait used by [`ScheduleGraph::process_configs`] to process a single [`ScheduleConfig`]. +trait ProcessScheduleConfig: Schedulable + Sized { + /// Process a single [`ScheduleConfig`]. + fn process_config(schedule_graph: &mut ScheduleGraph, config: ScheduleConfig) -> NodeId; } -impl ProcessNodeConfig for ScheduleSystem { - fn process_config(schedule_graph: &mut ScheduleGraph, config: NodeConfig) -> NodeId { +impl ProcessScheduleConfig for ScheduleSystem { + fn process_config(schedule_graph: &mut ScheduleGraph, config: ScheduleConfig) -> NodeId { schedule_graph.add_system_inner(config).unwrap() } } -impl ProcessNodeConfig for InternedSystemSet { - fn process_config(schedule_graph: &mut ScheduleGraph, config: NodeConfig) -> NodeId { +impl ProcessScheduleConfig for InternedSystemSet { + fn process_config(schedule_graph: &mut ScheduleGraph, config: ScheduleConfig) -> NodeId { schedule_graph.configure_set_inner(config).unwrap() } } @@ -1862,7 +1903,7 @@ impl ScheduleGraph { &'a self, ambiguities: &'a [(NodeId, NodeId, Vec)], components: &'a Components, - ) -> impl Iterator)> + 'a { + ) -> impl Iterator>)> + 'a { ambiguities .iter() .map(move |(system_a, system_b, conflicts)| { @@ -2019,10 +2060,9 @@ mod tests { use bevy_ecs_macros::ScheduleLabel; use crate::{ - prelude::{Res, Resource}, + prelude::{ApplyDeferred, Res, Resource}, schedule::{ - tests::ResMut, IntoSystemConfigs, IntoSystemSetConfigs, Schedule, - ScheduleBuildSettings, SystemSet, + tests::ResMut, IntoScheduleConfigs, Schedule, ScheduleBuildSettings, SystemSet, }, system::Commands, world::World, @@ -2045,12 +2085,12 @@ mod tests { let mut world = World::new(); let mut schedule = Schedule::default(); + let system: fn() = || { + panic!("This system must not run"); + }; + schedule.configure_sets(Set.run_if(|| false)); - schedule.add_systems( - (|| panic!("This system must not run")) - .ambiguous_with(|| ()) - .in_set(Set), - ); + schedule.add_systems(system.ambiguous_with(|| ()).in_set(Set)); schedule.run(&mut world); } @@ -2071,6 +2111,108 @@ mod tests { assert_eq!(schedule.executable.systems.len(), 3); } + #[test] + fn explicit_sync_point_used_as_auto_sync_point() { + let mut schedule = Schedule::default(); + let mut world = World::default(); + schedule.add_systems( + ( + |mut commands: Commands| commands.insert_resource(Resource1), + |_: Res| {}, + ) + .chain(), + ); + schedule.add_systems((|| {}, ApplyDeferred, || {}).chain()); + schedule.run(&mut world); + + // No sync point was inserted, since we can reuse the explicit sync point. + assert_eq!(schedule.executable.systems.len(), 5); + } + + #[test] + fn conditional_explicit_sync_point_not_used_as_auto_sync_point() { + let mut schedule = Schedule::default(); + let mut world = World::default(); + schedule.add_systems( + ( + |mut commands: Commands| commands.insert_resource(Resource1), + |_: Res| {}, + ) + .chain(), + ); + schedule.add_systems((|| {}, ApplyDeferred.run_if(|| false), || {}).chain()); + schedule.run(&mut world); + + // A sync point was inserted, since the explicit sync point is not always run. + assert_eq!(schedule.executable.systems.len(), 6); + } + + #[test] + fn conditional_explicit_sync_point_not_used_as_auto_sync_point_condition_on_chain() { + let mut schedule = Schedule::default(); + let mut world = World::default(); + schedule.add_systems( + ( + |mut commands: Commands| commands.insert_resource(Resource1), + |_: Res| {}, + ) + .chain(), + ); + schedule.add_systems((|| {}, ApplyDeferred, || {}).chain().run_if(|| false)); + schedule.run(&mut world); + + // A sync point was inserted, since the explicit sync point is not always run. + assert_eq!(schedule.executable.systems.len(), 6); + } + + #[test] + fn conditional_explicit_sync_point_not_used_as_auto_sync_point_condition_on_system_set() { + #[derive(SystemSet, Debug, Clone, PartialEq, Eq, Hash)] + struct Set; + + let mut schedule = Schedule::default(); + let mut world = World::default(); + schedule.configure_sets(Set.run_if(|| false)); + schedule.add_systems( + ( + |mut commands: Commands| commands.insert_resource(Resource1), + |_: Res| {}, + ) + .chain(), + ); + schedule.add_systems((|| {}, ApplyDeferred.in_set(Set), || {}).chain()); + schedule.run(&mut world); + + // A sync point was inserted, since the explicit sync point is not always run. + assert_eq!(schedule.executable.systems.len(), 6); + } + + #[test] + fn conditional_explicit_sync_point_not_used_as_auto_sync_point_condition_on_nested_system_set() + { + #[derive(SystemSet, Debug, Clone, PartialEq, Eq, Hash)] + struct Set1; + #[derive(SystemSet, Debug, Clone, PartialEq, Eq, Hash)] + struct Set2; + + let mut schedule = Schedule::default(); + let mut world = World::default(); + schedule.configure_sets(Set2.run_if(|| false)); + schedule.configure_sets(Set1.in_set(Set2)); + schedule.add_systems( + ( + |mut commands: Commands| commands.insert_resource(Resource1), + |_: Res| {}, + ) + .chain(), + ); + schedule.add_systems((|| {}, ApplyDeferred, || {}).chain().in_set(Set1)); + schedule.run(&mut world); + + // A sync point was inserted, since the explicit sync point is not always run. + assert_eq!(schedule.executable.systems.len(), 6); + } + #[test] fn merges_sync_points_into_one() { let mut schedule = Schedule::default(); @@ -2123,6 +2265,63 @@ mod tests { assert_eq!(schedule.executable.systems.len(), 5); } + #[test] + fn do_not_consider_ignore_deferred_before_exclusive_system() { + let mut schedule = Schedule::default(); + let mut world = World::default(); + // chain_ignore_deferred adds no sync points usually but an exception is made for exclusive systems + schedule.add_systems( + ( + |_: Commands| {}, + // <- no sync point is added here because the following system is not exclusive + |mut commands: Commands| commands.insert_resource(Resource1), + // <- sync point is added here because the following system is exclusive which expects to see all commands to that point + |world: &mut World| assert!(world.contains_resource::()), + // <- no sync point is added here because the previous system has no deferred parameters + |_: &mut World| {}, + // <- no sync point is added here because the following system is not exclusive + |_: Commands| {}, + ) + .chain_ignore_deferred(), + ); + schedule.run(&mut world); + + assert_eq!(schedule.executable.systems.len(), 6); // 5 systems + 1 sync point + } + + #[test] + fn bubble_sync_point_through_ignore_deferred_node() { + let mut schedule = Schedule::default(); + let mut world = World::default(); + + let insert_resource_config = ( + // the first system has deferred commands + |mut commands: Commands| commands.insert_resource(Resource1), + // the second system has no deferred commands + || {}, + ) + // the first two systems are chained without a sync point in between + .chain_ignore_deferred(); + + schedule.add_systems( + ( + insert_resource_config, + // the third system would panic if the command of the first system was not applied + |_: Res| {}, + ) + // the third system is chained after the first two, possibly with a sync point in between + .chain(), + ); + + // To add a sync point between the second and third system despite the second having no commands, + // the first system has to signal the second system that there are unapplied commands. + // With that the second system will add a sync point after it so the third system will find the resource. + + schedule.run(&mut world); + + assert_eq!(schedule.executable.systems.len(), 4); // 3 systems + 1 sync point + } + #[test] fn disable_auto_sync_points() { let mut schedule = Schedule::default(); diff --git a/crates/bevy_ecs/src/schedule/stepping.rs b/crates/bevy_ecs/src/schedule/stepping.rs index f26da4cad3..b5df8555e2 100644 --- a/crates/bevy_ecs/src/schedule/stepping.rs +++ b/crates/bevy_ecs/src/schedule/stepping.rs @@ -4,7 +4,7 @@ use crate::{ system::{IntoSystem, ResMut}, }; use alloc::vec::Vec; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_utils::TypeIdMap; use core::any::TypeId; use fixedbitset::FixedBitSet; @@ -823,6 +823,7 @@ impl ScheduleState { } #[cfg(all(test, feature = "bevy_debug_stepping"))] +#[expect(clippy::print_stdout, reason = "Allowed in tests.")] mod tests { use super::*; use crate::{prelude::*, schedule::ScheduleLabel}; @@ -1348,7 +1349,9 @@ mod tests { // // first system will be configured as `run_if(|| false)`, so it can // just panic if called - let first_system = move || panic!("first_system should not be run"); + let first_system: fn() = move || { + panic!("first_system should not be run"); + }; // The second system, we need to know when it has been called, so we'll // add a resource for tracking if it has been run. The system will diff --git a/crates/bevy_ecs/src/spawn.rs b/crates/bevy_ecs/src/spawn.rs index 2fb04c4c7b..5235889ffb 100644 --- a/crates/bevy_ecs/src/spawn.rs +++ b/crates/bevy_ecs/src/spawn.rs @@ -2,11 +2,12 @@ //! for the best entry points into these APIs and examples of how to use them. use crate::{ - bundle::{Bundle, BundleEffect, DynamicBundle}, + bundle::{Bundle, BundleEffect, DynamicBundle, NoBundleEffect}, entity::Entity, relationship::{RelatedSpawner, Relationship, RelationshipTarget}, world::{EntityWorldMut, World}, }; +use alloc::vec::Vec; use core::marker::PhantomData; use variadics_please::all_tuples; @@ -45,6 +46,17 @@ pub trait SpawnableList { fn size_hint(&self) -> usize; } +impl> SpawnableList for Vec { + fn spawn(self, world: &mut World, entity: Entity) { + let mapped_bundles = self.into_iter().map(|b| (R::from(entity), b)); + world.spawn_batch(mapped_bundles); + } + + fn size_hint(&self) -> usize { + self.len() + } +} + impl SpawnableList for Spawn { fn spawn(self, world: &mut World, entity: Entity) { world.spawn((R::from(entity), self.0)); @@ -113,7 +125,7 @@ impl) + Send + Sync + 'static> for SpawnWith { fn spawn(self, world: &mut World, entity: Entity) { - world.entity_mut(entity).with_related(self.0); + world.entity_mut(entity).with_related_entities(self.0); } fn size_hint(&self) -> usize { @@ -175,7 +187,7 @@ unsafe impl + Send + Sync + 'static> Bundle for SpawnRelatedBundle { fn component_ids( - components: &mut crate::component::Components, + components: &mut crate::component::ComponentsRegistrator, ids: &mut impl FnMut(crate::component::ComponentId), ) { ::component_ids(components, ids); @@ -189,7 +201,7 @@ unsafe impl + Send + Sync + 'static> Bundle } fn register_required_components( - components: &mut crate::component::Components, + components: &mut crate::component::ComponentsRegistrator, required_components: &mut crate::component::RequiredComponents, ) { ::register_required_components( @@ -223,9 +235,7 @@ pub struct SpawnOneRelated { impl BundleEffect for SpawnOneRelated { fn apply(self, entity: &mut EntityWorldMut) { - entity.with_related::(|s| { - s.spawn(self.bundle); - }); + entity.with_related::(self.bundle); } } @@ -244,7 +254,7 @@ impl DynamicBundle for SpawnOneRelated { // SAFETY: This internally relies on the RelationshipTarget's Bundle implementation, which is sound. unsafe impl Bundle for SpawnOneRelated { fn component_ids( - components: &mut crate::component::Components, + components: &mut crate::component::ComponentsRegistrator, ids: &mut impl FnMut(crate::component::ComponentId), ) { ::component_ids(components, ids); @@ -258,7 +268,7 @@ unsafe impl Bundle for SpawnOneRelated { } fn register_required_components( - components: &mut crate::component::Components, + components: &mut crate::component::ComponentsRegistrator, required_components: &mut crate::component::RequiredComponents, ) { ::register_required_components( diff --git a/crates/bevy_ecs/src/storage/blob_array.rs b/crates/bevy_ecs/src/storage/blob_array.rs index 911efde621..9b738a763c 100644 --- a/crates/bevy_ecs/src/storage/blob_array.rs +++ b/crates/bevy_ecs/src/storage/blob_array.rs @@ -76,7 +76,7 @@ impl BlobArray { /// /// # Safety /// - The element at index `index` is safe to access. - /// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `index` < `len`) + /// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `index` < `len`) /// /// [`Vec::len`]: alloc::vec::Vec::len #[inline] @@ -99,7 +99,7 @@ impl BlobArray { /// /// # Safety /// - The element with at index `index` is safe to access. - /// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `index` < `len`) + /// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `index` < `len`) /// /// [`Vec::len`]: alloc::vec::Vec::len #[inline] @@ -156,7 +156,7 @@ impl BlobArray { /// /// # Safety /// - For every element with index `i`, if `i` < `len`: It must be safe to call [`Self::get_unchecked_mut`] with `i`. - /// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `len` is correct.) + /// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `len` is correct.) /// /// [`Vec::clear`]: alloc::vec::Vec::clear pub unsafe fn clear(&mut self, len: usize) { @@ -256,7 +256,7 @@ impl BlobArray { new_capacity: NonZeroUsize, ) { #[cfg(debug_assertions)] - debug_assert_eq!(self.capacity, current_capacity.into()); + debug_assert_eq!(self.capacity, current_capacity.get()); if !self.is_zst() { // SAFETY: `new_capacity` can't overflow usize let new_layout = @@ -289,7 +289,7 @@ impl BlobArray { /// # Safety /// - `index` must be in bounds (`index` < capacity) /// - The [`Layout`] of the value must match the layout of the blobs stored in this array, - /// and it must be safe to use the `drop` function of this [`BlobArray`] to drop `value`. + /// and it must be safe to use the `drop` function of this [`BlobArray`] to drop `value`. /// - `value` must not point to the same value that is being initialized. #[inline] pub unsafe fn initialize_unchecked(&mut self, index: usize, value: OwningPtr<'_>) { @@ -305,7 +305,7 @@ impl BlobArray { /// # Safety /// - Index must be in-bounds (`index` < `len`) /// - `value`'s [`Layout`] must match this [`BlobArray`]'s `item_layout`, - /// and it must be safe to use the `drop` function of this [`BlobArray`] to drop `value`. + /// and it must be safe to use the `drop` function of this [`BlobArray`] to drop `value`. /// - `value` must not point to the same value that is being replaced. pub unsafe fn replace_unchecked(&mut self, index: usize, value: OwningPtr<'_>) { #[cfg(debug_assertions)] diff --git a/crates/bevy_ecs/src/storage/blob_vec.rs b/crates/bevy_ecs/src/storage/blob_vec.rs index 971cf80fa7..2451fccb14 100644 --- a/crates/bevy_ecs/src/storage/blob_vec.rs +++ b/crates/bevy_ecs/src/storage/blob_vec.rs @@ -176,7 +176,7 @@ impl BlobVec { /// # Safety /// - index must be in bounds /// - the memory in the [`BlobVec`] starting at index `index`, of a size matching this [`BlobVec`]'s - /// `item_layout`, must have been previously allocated. + /// `item_layout`, must have been previously allocated. #[inline] pub unsafe fn initialize_unchecked(&mut self, index: usize, value: OwningPtr<'_>) { debug_assert!(index < self.len()); @@ -189,10 +189,10 @@ impl BlobVec { /// # Safety /// - index must be in-bounds /// - the memory in the [`BlobVec`] starting at index `index`, of a size matching this - /// [`BlobVec`]'s `item_layout`, must have been previously initialized with an item matching - /// this [`BlobVec`]'s `item_layout` + /// [`BlobVec`]'s `item_layout`, must have been previously initialized with an item matching + /// this [`BlobVec`]'s `item_layout` /// - the memory at `*value` must also be previously initialized with an item matching this - /// [`BlobVec`]'s `item_layout` + /// [`BlobVec`]'s `item_layout` pub unsafe fn replace_unchecked(&mut self, index: usize, value: OwningPtr<'_>) { debug_assert!(index < self.len()); diff --git a/crates/bevy_ecs/src/storage/resource.rs b/crates/bevy_ecs/src/storage/resource.rs index ac9bbfa54d..caa0785b79 100644 --- a/crates/bevy_ecs/src/storage/resource.rs +++ b/crates/bevy_ecs/src/storage/resource.rs @@ -1,14 +1,12 @@ use crate::{ archetype::ArchetypeComponentId, - change_detection::{MaybeLocation, MaybeUnsafeCellLocation, MutUntyped, TicksMut}, + change_detection::{MaybeLocation, MutUntyped, TicksMut}, component::{ComponentId, ComponentTicks, Components, Tick, TickCells}, storage::{blob_vec::BlobVec, SparseSet}, }; use alloc::string::String; use bevy_ptr::{OwningPtr, Ptr, UnsafeCellDeref}; -#[cfg(feature = "track_location")] -use core::panic::Location; -use core::{cell::UnsafeCell, mem::ManuallyDrop}; +use core::{cell::UnsafeCell, mem::ManuallyDrop, panic::Location}; #[cfg(feature = "std")] use std::thread::ThreadId; @@ -30,8 +28,7 @@ pub struct ResourceData { id: ArchetypeComponentId, #[cfg(feature = "std")] origin_thread_id: Option, - #[cfg(feature = "track_location")] - changed_by: UnsafeCell<&'static Location<'static>>, + changed_by: MaybeLocation>>, } impl Drop for ResourceData { @@ -70,6 +67,13 @@ impl ResourceData { #[inline] fn validate_access(&self) { if SEND { + #[cfg_attr( + not(feature = "std"), + expect( + clippy::needless_return, + reason = "needless until no_std is addressed (see below)", + ) + )] return; } @@ -87,6 +91,7 @@ impl ResourceData { // TODO: Handle no_std non-send. // Currently, no_std is single-threaded only, so this is safe to ignore. // To support no_std multithreading, an alternative will be required. + // Remove the #[expect] attribute above when this is addressed. } /// Returns true if the resource is populated. @@ -136,7 +141,11 @@ impl ResourceData { #[inline] pub(crate) fn get_with_ticks( &self, - ) -> Option<(Ptr<'_>, TickCells<'_>, MaybeUnsafeCellLocation<'_>)> { + ) -> Option<( + Ptr<'_>, + TickCells<'_>, + MaybeLocation<&UnsafeCell<&'static Location<'static>>>, + )> { self.is_present().then(|| { self.validate_access(); ( @@ -146,10 +155,7 @@ impl ResourceData { added: &self.added_ticks, changed: &self.changed_ticks, }, - #[cfg(feature = "track_location")] - &self.changed_by, - #[cfg(not(feature = "track_location"))] - (), + self.changed_by.as_ref(), ) }) } @@ -160,15 +166,14 @@ impl ResourceData { /// If `SEND` is false, this will panic if a value is present and is not accessed from the /// original thread it was inserted in. pub(crate) fn get_mut(&mut self, last_run: Tick, this_run: Tick) -> Option> { - let (ptr, ticks, _caller) = self.get_with_ticks()?; + let (ptr, ticks, caller) = self.get_with_ticks()?; Some(MutUntyped { // SAFETY: We have exclusive access to the underlying storage. value: unsafe { ptr.assert_unique() }, // SAFETY: We have exclusive access to the underlying storage. ticks: unsafe { TicksMut::from_tick_cells(ticks, last_run, this_run) }, - #[cfg(feature = "track_location")] // SAFETY: We have exclusive access to the underlying storage. - changed_by: unsafe { _caller.deref_mut() }, + changed_by: unsafe { caller.map(|caller| caller.deref_mut()) }, }) } @@ -186,7 +191,7 @@ impl ResourceData { &mut self, value: OwningPtr<'_>, change_tick: Tick, - #[cfg(feature = "track_location")] caller: &'static Location, + caller: MaybeLocation, ) { if self.is_present() { self.validate_access(); @@ -205,10 +210,11 @@ impl ResourceData { *self.added_ticks.deref_mut() = change_tick; } *self.changed_ticks.deref_mut() = change_tick; - #[cfg(feature = "track_location")] - { - *self.changed_by.deref_mut() = caller; - } + + self.changed_by + .as_ref() + .map(|changed_by| changed_by.deref_mut()) + .assign(caller); } /// Inserts a value into the resource with a pre-existing change tick. If a @@ -225,7 +231,7 @@ impl ResourceData { &mut self, value: OwningPtr<'_>, change_ticks: ComponentTicks, - #[cfg(feature = "track_location")] caller: &'static Location, + caller: MaybeLocation, ) { if self.is_present() { self.validate_access(); @@ -244,10 +250,10 @@ impl ResourceData { } *self.added_ticks.deref_mut() = change_ticks.added; *self.changed_ticks.deref_mut() = change_ticks.changed; - #[cfg(feature = "track_location")] - { - *self.changed_by.deref_mut() = caller; - } + self.changed_by + .as_ref() + .map(|changed_by| changed_by.deref_mut()) + .assign(caller); } /// Removes a value from the resource, if present. @@ -267,11 +273,11 @@ impl ResourceData { // SAFETY: We've already validated that the row is present. let res = unsafe { self.data.swap_remove_and_forget_unchecked(Self::ROW) }; - // SAFETY: This function is being called through an exclusive mutable reference to Self - #[cfg(feature = "track_location")] - let caller = unsafe { *self.changed_by.deref_mut() }; - #[cfg(not(feature = "track_location"))] - let caller = (); + let caller = self + .changed_by + .as_ref() + // SAFETY: This function is being called through an exclusive mutable reference to Self + .map(|changed_by| unsafe { *changed_by.deref_mut() }); // SAFETY: This function is being called through an exclusive mutable reference to Self, which // makes it sound to read these ticks. @@ -392,8 +398,7 @@ impl Resources { id: f(), #[cfg(feature = "std")] origin_thread_id: None, - #[cfg(feature = "track_location")] - changed_by: UnsafeCell::new(Location::caller()) + changed_by: MaybeLocation::caller().map(UnsafeCell::new), } }) } diff --git a/crates/bevy_ecs/src/storage/sparse_set.rs b/crates/bevy_ecs/src/storage/sparse_set.rs index d6ea6c59ec..bb79382e06 100644 --- a/crates/bevy_ecs/src/storage/sparse_set.rs +++ b/crates/bevy_ecs/src/storage/sparse_set.rs @@ -1,14 +1,12 @@ use crate::{ - change_detection::MaybeUnsafeCellLocation, + change_detection::MaybeLocation, component::{ComponentId, ComponentInfo, ComponentTicks, Tick, TickCells}, entity::Entity, storage::{Column, TableRow}, }; use alloc::{boxed::Box, vec::Vec}; use bevy_ptr::{OwningPtr, Ptr}; -#[cfg(feature = "track_location")] -use core::panic::Location; -use core::{cell::UnsafeCell, hash::Hash, marker::PhantomData}; +use core::{cell::UnsafeCell, hash::Hash, marker::PhantomData, panic::Location}; use nonmax::NonMaxUsize; type EntityIndex = u32; @@ -170,26 +168,16 @@ impl ComponentSparseSet { entity: Entity, value: OwningPtr<'_>, change_tick: Tick, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { if let Some(&dense_index) = self.sparse.get(entity.index()) { #[cfg(debug_assertions)] assert_eq!(entity, self.entities[dense_index.as_usize()]); - self.dense.replace( - dense_index, - value, - change_tick, - #[cfg(feature = "track_location")] - caller, - ); + self.dense.replace(dense_index, value, change_tick, caller); } else { let dense_index = self.dense.len(); - self.dense.push( - value, - ComponentTicks::new(change_tick), - #[cfg(feature = "track_location")] - caller, - ); + self.dense + .push(value, ComponentTicks::new(change_tick), caller); self.sparse .insert(entity.index(), TableRow::from_usize(dense_index)); #[cfg(debug_assertions)] @@ -238,7 +226,11 @@ impl ComponentSparseSet { pub fn get_with_ticks( &self, entity: Entity, - ) -> Option<(Ptr<'_>, TickCells<'_>, MaybeUnsafeCellLocation<'_>)> { + ) -> Option<( + Ptr<'_>, + TickCells<'_>, + MaybeLocation<&UnsafeCell<&'static Location<'static>>>, + )> { let dense_index = *self.sparse.get(entity.index())?; #[cfg(debug_assertions)] assert_eq!(entity, self.entities[dense_index.as_usize()]); @@ -250,10 +242,7 @@ impl ComponentSparseSet { added: self.dense.get_added_tick_unchecked(dense_index), changed: self.dense.get_changed_tick_unchecked(dense_index), }, - #[cfg(feature = "track_location")] self.dense.get_changed_by_unchecked(dense_index), - #[cfg(not(feature = "track_location"))] - (), )) } } @@ -298,16 +287,17 @@ impl ComponentSparseSet { /// /// Returns `None` if `entity` does not have a component in the sparse set. #[inline] - #[cfg(feature = "track_location")] pub fn get_changed_by( &self, entity: Entity, - ) -> Option<&UnsafeCell<&'static Location<'static>>> { - let dense_index = *self.sparse.get(entity.index())?; - #[cfg(debug_assertions)] - assert_eq!(entity, self.entities[dense_index.as_usize()]); - // SAFETY: if the sparse index points to something in the dense vec, it exists - unsafe { Some(self.dense.get_changed_by_unchecked(dense_index)) } + ) -> MaybeLocation>>> { + MaybeLocation::new_with_flattened(|| { + let dense_index = *self.sparse.get(entity.index())?; + #[cfg(debug_assertions)] + assert_eq!(entity, self.entities[dense_index.as_usize()]); + // SAFETY: if the sparse index points to something in the dense vec, it exists + unsafe { Some(self.dense.get_changed_by_unchecked(dense_index)) } + }) } /// Removes the `entity` from this sparse set and returns a pointer to the associated value (if @@ -732,10 +722,10 @@ mod tests { assert_eq!(sets.len(), 0); assert!(sets.is_empty()); - init_component::(&mut sets, 1); + register_component::(&mut sets, 1); assert_eq!(sets.len(), 1); - init_component::(&mut sets, 2); + register_component::(&mut sets, 2); assert_eq!(sets.len(), 2); // check its shape by iter @@ -749,7 +739,7 @@ mod tests { vec![(ComponentId::new(1), 0), (ComponentId::new(2), 0),] ); - fn init_component(sets: &mut SparseSets, id: usize) { + fn register_component(sets: &mut SparseSets, id: usize) { let descriptor = ComponentDescriptor::new::(); let id = ComponentId::new(id); let info = ComponentInfo::new(id, descriptor); diff --git a/crates/bevy_ecs/src/storage/table/column.rs b/crates/bevy_ecs/src/storage/table/column.rs index 4054b5c15f..d4690d264c 100644 --- a/crates/bevy_ecs/src/storage/table/column.rs +++ b/crates/bevy_ecs/src/storage/table/column.rs @@ -1,10 +1,12 @@ use super::*; use crate::{ + change_detection::MaybeLocation, component::TickCells, storage::{blob_array::BlobArray, thin_array_ptr::ThinArrayPtr}, }; use alloc::vec::Vec; use bevy_ptr::PtrMut; +use core::panic::Location; /// Very similar to a normal [`Column`], but with the capacities and lengths cut out for performance reasons. /// @@ -17,8 +19,7 @@ pub struct ThinColumn { pub(super) data: BlobArray, pub(super) added_ticks: ThinArrayPtr>, pub(super) changed_ticks: ThinArrayPtr>, - #[cfg(feature = "track_location")] - pub(super) changed_by: ThinArrayPtr>>, + pub(super) changed_by: MaybeLocation>>>, } impl ThinColumn { @@ -31,8 +32,7 @@ impl ThinColumn { }, added_ticks: ThinArrayPtr::with_capacity(capacity), changed_ticks: ThinArrayPtr::with_capacity(capacity), - #[cfg(feature = "track_location")] - changed_by: ThinArrayPtr::with_capacity(capacity), + changed_by: MaybeLocation::new_with(|| ThinArrayPtr::with_capacity(capacity)), } } @@ -54,9 +54,9 @@ impl ThinColumn { .swap_remove_unchecked_nonoverlapping(row.as_usize(), last_element_index); self.changed_ticks .swap_remove_unchecked_nonoverlapping(row.as_usize(), last_element_index); - #[cfg(feature = "track_location")] - self.changed_by - .swap_remove_unchecked_nonoverlapping(row.as_usize(), last_element_index); + self.changed_by.as_mut().map(|changed_by| { + changed_by.swap_remove_unchecked_nonoverlapping(row.as_usize(), last_element_index); + }); } /// Swap-remove and drop the removed element. @@ -76,9 +76,9 @@ impl ThinColumn { .swap_remove_and_drop_unchecked(row.as_usize(), last_element_index); self.changed_ticks .swap_remove_and_drop_unchecked(row.as_usize(), last_element_index); - #[cfg(feature = "track_location")] - self.changed_by - .swap_remove_and_drop_unchecked(row.as_usize(), last_element_index); + self.changed_by.as_mut().map(|changed_by| { + changed_by.swap_remove_and_drop_unchecked(row.as_usize(), last_element_index); + }); } /// Swap-remove and forget the removed element. @@ -99,9 +99,9 @@ impl ThinColumn { .swap_remove_unchecked(row.as_usize(), last_element_index); self.changed_ticks .swap_remove_unchecked(row.as_usize(), last_element_index); - #[cfg(feature = "track_location")] self.changed_by - .swap_remove_unchecked(row.as_usize(), last_element_index); + .as_mut() + .map(|changed_by| changed_by.swap_remove_unchecked(row.as_usize(), last_element_index)); } /// Call [`realloc`](std::alloc::realloc) to expand / shrink the memory allocation for this [`ThinColumn`] @@ -117,8 +117,9 @@ impl ThinColumn { self.data.realloc(current_capacity, new_capacity); self.added_ticks.realloc(current_capacity, new_capacity); self.changed_ticks.realloc(current_capacity, new_capacity); - #[cfg(feature = "track_location")] - self.changed_by.realloc(current_capacity, new_capacity); + self.changed_by + .as_mut() + .map(|changed_by| changed_by.realloc(current_capacity, new_capacity)); } /// Call [`alloc`](std::alloc::alloc) to allocate memory for this [`ThinColumn`] @@ -127,8 +128,9 @@ impl ThinColumn { self.data.alloc(new_capacity); self.added_ticks.alloc(new_capacity); self.changed_ticks.alloc(new_capacity); - #[cfg(feature = "track_location")] - self.changed_by.alloc(new_capacity); + self.changed_by + .as_mut() + .map(|changed_by| changed_by.alloc(new_capacity)); } /// Writes component data to the column at the given row. @@ -144,7 +146,7 @@ impl ThinColumn { row: TableRow, data: OwningPtr<'_>, tick: Tick, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { self.data.initialize_unchecked(row.as_usize(), data); *self.added_ticks.get_unchecked_mut(row.as_usize()).get_mut() = tick; @@ -152,10 +154,10 @@ impl ThinColumn { .changed_ticks .get_unchecked_mut(row.as_usize()) .get_mut() = tick; - #[cfg(feature = "track_location")] - { - *self.changed_by.get_unchecked_mut(row.as_usize()).get_mut() = caller; - } + self.changed_by + .as_mut() + .map(|changed_by| changed_by.get_unchecked_mut(row.as_usize()).get_mut()) + .assign(caller); } /// Writes component data to the column at given row. Assumes the slot is initialized, drops the previous value. @@ -169,17 +171,17 @@ impl ThinColumn { row: TableRow, data: OwningPtr<'_>, change_tick: Tick, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { self.data.replace_unchecked(row.as_usize(), data); *self .changed_ticks .get_unchecked_mut(row.as_usize()) .get_mut() = change_tick; - #[cfg(feature = "track_location")] - { - *self.changed_by.get_unchecked_mut(row.as_usize()).get_mut() = caller; - } + self.changed_by + .as_mut() + .map(|changed_by| changed_by.get_unchecked_mut(row.as_usize()).get_mut()) + .assign(caller); } /// Removes the element from `other` at `src_row` and inserts it @@ -218,13 +220,13 @@ impl ThinColumn { .swap_remove_unchecked(src_row.as_usize(), other_last_element_index); self.changed_ticks .initialize_unchecked(dst_row.as_usize(), changed_tick); - #[cfg(feature = "track_location")] - let changed_by = other - .changed_by - .swap_remove_unchecked(src_row.as_usize(), other_last_element_index); - #[cfg(feature = "track_location")] - self.changed_by - .initialize_unchecked(dst_row.as_usize(), changed_by); + self.changed_by.as_mut().zip(other.changed_by.as_mut()).map( + |(self_changed_by, other_changed_by)| { + let changed_by = other_changed_by + .swap_remove_unchecked(src_row.as_usize(), other_last_element_index); + self_changed_by.initialize_unchecked(dst_row.as_usize(), changed_by); + }, + ); } /// Call [`Tick::check_tick`] on all of the ticks stored in this column. @@ -258,8 +260,9 @@ impl ThinColumn { self.added_ticks.clear_elements(len); self.changed_ticks.clear_elements(len); self.data.clear(len); - #[cfg(feature = "track_location")] - self.changed_by.clear_elements(len); + self.changed_by + .as_mut() + .map(|changed_by| changed_by.clear_elements(len)); } /// Because this method needs parameters, it can't be the implementation of the `Drop` trait. @@ -273,8 +276,9 @@ impl ThinColumn { self.added_ticks.drop(cap, len); self.changed_ticks.drop(cap, len); self.data.drop(cap, len); - #[cfg(feature = "track_location")] - self.changed_by.drop(cap, len); + self.changed_by + .as_mut() + .map(|changed_by| changed_by.drop(cap, len)); } /// Drops the last component in this column. @@ -285,8 +289,9 @@ impl ThinColumn { pub(crate) unsafe fn drop_last_component(&mut self, last_element_index: usize) { core::ptr::drop_in_place(self.added_ticks.get_unchecked_raw(last_element_index)); core::ptr::drop_in_place(self.changed_ticks.get_unchecked_raw(last_element_index)); - #[cfg(feature = "track_location")] - core::ptr::drop_in_place(self.changed_by.get_unchecked_raw(last_element_index)); + self.changed_by.as_mut().map(|changed_by| { + core::ptr::drop_in_place(changed_by.get_unchecked_raw(last_element_index)); + }); self.data.drop_last_element(last_element_index); } @@ -319,12 +324,13 @@ impl ThinColumn { /// /// # Safety /// - `len` must match the actual length of this column (number of elements stored) - #[cfg(feature = "track_location")] pub unsafe fn get_changed_by_slice( &self, len: usize, - ) -> &[UnsafeCell<&'static Location<'static>>] { - self.changed_by.as_slice(len) + ) -> MaybeLocation<&[UnsafeCell<&'static Location<'static>>]> { + self.changed_by + .as_ref() + .map(|changed_by| changed_by.as_slice(len)) } } @@ -343,8 +349,7 @@ pub struct Column { pub(super) data: BlobVec, pub(super) added_ticks: Vec>, pub(super) changed_ticks: Vec>, - #[cfg(feature = "track_location")] - changed_by: Vec>>, + changed_by: MaybeLocation>>>, } impl Column { @@ -356,8 +361,7 @@ impl Column { data: unsafe { BlobVec::new(component_info.layout(), component_info.drop(), capacity) }, added_ticks: Vec::with_capacity(capacity), changed_ticks: Vec::with_capacity(capacity), - #[cfg(feature = "track_location")] - changed_by: Vec::with_capacity(capacity), + changed_by: MaybeLocation::new_with(|| Vec::with_capacity(capacity)), } } @@ -378,7 +382,7 @@ impl Column { row: TableRow, data: OwningPtr<'_>, change_tick: Tick, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { debug_assert!(row.as_usize() < self.len()); self.data.replace_unchecked(row.as_usize(), data); @@ -386,10 +390,10 @@ impl Column { .changed_ticks .get_unchecked_mut(row.as_usize()) .get_mut() = change_tick; - #[cfg(feature = "track_location")] - { - *self.changed_by.get_unchecked_mut(row.as_usize()).get_mut() = caller; - } + self.changed_by + .as_mut() + .map(|changed_by| changed_by.get_unchecked_mut(row.as_usize()).get_mut()) + .assign(caller); } /// Gets the current number of elements stored in the column. @@ -418,8 +422,9 @@ impl Column { self.data.swap_remove_and_drop_unchecked(row.as_usize()); self.added_ticks.swap_remove(row.as_usize()); self.changed_ticks.swap_remove(row.as_usize()); - #[cfg(feature = "track_location")] - self.changed_by.swap_remove(row.as_usize()); + self.changed_by + .as_mut() + .map(|changed_by| changed_by.swap_remove(row.as_usize())); } /// Removes an element from the [`Column`] and returns it and its change detection ticks. @@ -442,10 +447,10 @@ impl Column { let data = self.data.swap_remove_and_forget_unchecked(row.as_usize()); let added = self.added_ticks.swap_remove(row.as_usize()).into_inner(); let changed = self.changed_ticks.swap_remove(row.as_usize()).into_inner(); - #[cfg(feature = "track_location")] - let caller = self.changed_by.swap_remove(row.as_usize()).into_inner(); - #[cfg(not(feature = "track_location"))] - let caller = (); + let caller = self + .changed_by + .as_mut() + .map(|changed_by| changed_by.swap_remove(row.as_usize()).into_inner()); (data, ComponentTicks { added, changed }, caller) } @@ -457,13 +462,15 @@ impl Column { &mut self, ptr: OwningPtr<'_>, ticks: ComponentTicks, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { self.data.push(ptr); self.added_ticks.push(UnsafeCell::new(ticks.added)); self.changed_ticks.push(UnsafeCell::new(ticks.changed)); - #[cfg(feature = "track_location")] - self.changed_by.push(UnsafeCell::new(caller)); + self.changed_by + .as_mut() + .zip(caller) + .map(|(changed_by, caller)| changed_by.push(UnsafeCell::new(caller))); } /// Fetches the data pointer to the first element of the [`Column`]. @@ -644,8 +651,7 @@ impl Column { self.data.clear(); self.added_ticks.clear(); self.changed_ticks.clear(); - #[cfg(feature = "track_location")] - self.changed_by.clear(); + self.changed_by.as_mut().map(Vec::clear); } #[inline] @@ -666,9 +672,13 @@ impl Column { /// Users of this API must ensure that accesses to each individual element /// adhere to the safety invariants of [`UnsafeCell`]. #[inline] - #[cfg(feature = "track_location")] - pub fn get_changed_by(&self, row: TableRow) -> Option<&UnsafeCell<&'static Location<'static>>> { - self.changed_by.get(row.as_usize()) + pub fn get_changed_by( + &self, + row: TableRow, + ) -> MaybeLocation>>> { + self.changed_by + .as_ref() + .map(|changed_by| changed_by.get(row.as_usize())) } /// Fetches the calling location that last changed the value at `row`. @@ -678,12 +688,13 @@ impl Column { /// # Safety /// `row` must be within the range `[0, self.len())`. #[inline] - #[cfg(feature = "track_location")] pub unsafe fn get_changed_by_unchecked( &self, row: TableRow, - ) -> &UnsafeCell<&'static Location<'static>> { - debug_assert!(row.as_usize() < self.changed_by.len()); - self.changed_by.get_unchecked(row.as_usize()) + ) -> MaybeLocation<&UnsafeCell<&'static Location<'static>>> { + self.changed_by.as_ref().map(|changed_by| { + debug_assert!(row.as_usize() < changed_by.len()); + changed_by.get_unchecked(row.as_usize()) + }) } } diff --git a/crates/bevy_ecs/src/storage/table/mod.rs b/crates/bevy_ecs/src/storage/table/mod.rs index 4a9d795128..0f80b77f51 100644 --- a/crates/bevy_ecs/src/storage/table/mod.rs +++ b/crates/bevy_ecs/src/storage/table/mod.rs @@ -6,16 +6,15 @@ use crate::{ storage::{blob_vec::BlobVec, ImmutableSparseSet, SparseSet}, }; use alloc::{boxed::Box, vec, vec::Vec}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_ptr::{OwningPtr, Ptr, UnsafeCellDeref}; pub use column::*; -#[cfg(feature = "track_location")] -use core::panic::Location; use core::{ alloc::Layout, cell::UnsafeCell, num::NonZeroUsize, ops::{Index, IndexMut}, + panic::Location, }; mod column; @@ -390,14 +389,15 @@ impl Table { } /// Fetches the calling locations that last changed the each component - #[cfg(feature = "track_location")] pub fn get_changed_by_slice_for( &self, component_id: ComponentId, - ) -> Option<&[UnsafeCell<&'static Location<'static>>]> { - self.get_column(component_id) - // SAFETY: `self.len()` is guaranteed to be the len of the locations array - .map(|col| unsafe { col.get_changed_by_slice(self.entity_count()) }) + ) -> MaybeLocation>]>> { + MaybeLocation::new_with_flattened(|| { + self.get_column(component_id) + // SAFETY: `self.len()` is guaranteed to be the len of the locations array + .map(|col| unsafe { col.get_changed_by_slice(self.entity_count()) }) + }) } /// Get the specific [`change tick`](Tick) of the component matching `component_id` in `row`. @@ -433,20 +433,22 @@ impl Table { } /// Get the specific calling location that changed the component matching `component_id` in `row` - #[cfg(feature = "track_location")] pub fn get_changed_by( &self, component_id: ComponentId, row: TableRow, - ) -> Option<&UnsafeCell<&'static Location<'static>>> { - (row.as_usize() < self.entity_count()).then_some( - // SAFETY: `row.as_usize()` < `len` - unsafe { - self.get_column(component_id)? - .changed_by - .get_unchecked(row.as_usize()) - }, - ) + ) -> MaybeLocation>>> { + MaybeLocation::new_with_flattened(|| { + (row.as_usize() < self.entity_count()).then_some( + // SAFETY: `row.as_usize()` < `len` + unsafe { + self.get_column(component_id)? + .changed_by + .as_ref() + .map(|changed_by| changed_by.get_unchecked(row.as_usize())) + }, + ) + }) } /// Get the [`ComponentTicks`] of the component matching `component_id` in `row`. @@ -571,9 +573,12 @@ impl Table { .initialize_unchecked(len, UnsafeCell::new(Tick::new(0))); col.changed_ticks .initialize_unchecked(len, UnsafeCell::new(Tick::new(0))); - #[cfg(feature = "track_location")] col.changed_by - .initialize_unchecked(len, UnsafeCell::new(Location::caller())); + .as_mut() + .zip(MaybeLocation::caller()) + .map(|(changed_by, caller)| { + changed_by.initialize_unchecked(len, UnsafeCell::new(caller)); + }); } TableRow::from_usize(len) } @@ -816,16 +821,14 @@ impl Drop for Table { #[cfg(test)] mod tests { use crate::{ - component::{Component, Components, Tick}, + change_detection::MaybeLocation, + component::{Component, ComponentIds, Components, ComponentsRegistrator, Tick}, entity::Entity, ptr::OwningPtr, storage::{TableBuilder, TableId, TableRow, Tables}, }; use alloc::vec::Vec; - #[cfg(feature = "track_location")] - use core::panic::Location; - #[derive(Component)] struct W(T); @@ -844,7 +847,11 @@ mod tests { #[test] fn table() { let mut components = Components::default(); - let component_id = components.register_component::>(); + let mut componentids = ComponentIds::default(); + // SAFETY: They are both new. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut components, &mut componentids) }; + let component_id = registrator.register_component::>(); let columns = &[component_id]; let mut table = TableBuilder::with_capacity(0, columns.len()) .add_column(components.get_info(component_id).unwrap()) @@ -860,8 +867,7 @@ mod tests { row, value_ptr, Tick::new(0), - #[cfg(feature = "track_location")] - Location::caller(), + MaybeLocation::caller(), ); }); }; diff --git a/crates/bevy_ecs/src/storage/thin_array_ptr.rs b/crates/bevy_ecs/src/storage/thin_array_ptr.rs index 5654b6da67..9016344029 100644 --- a/crates/bevy_ecs/src/storage/thin_array_ptr.rs +++ b/crates/bevy_ecs/src/storage/thin_array_ptr.rs @@ -87,7 +87,7 @@ impl ThinArrayPtr { /// - The caller should update their saved `capacity` value to reflect the fact that it was changed pub unsafe fn realloc(&mut self, current_capacity: NonZeroUsize, new_capacity: NonZeroUsize) { #[cfg(debug_assertions)] - assert_eq!(self.capacity, current_capacity.into()); + assert_eq!(self.capacity, current_capacity.get()); self.set_capacity(new_capacity.get()); if size_of::() != 0 { let new_layout = diff --git a/crates/bevy_ecs/src/system/adapter_system.rs b/crates/bevy_ecs/src/system/adapter_system.rs index 27e812928c..825389a307 100644 --- a/crates/bevy_ecs/src/system/adapter_system.rs +++ b/crates/bevy_ecs/src/system/adapter_system.rs @@ -1,6 +1,6 @@ use alloc::{borrow::Cow, vec::Vec}; -use super::{IntoSystem, ReadOnlySystem, System}; +use super::{IntoSystem, ReadOnlySystem, System, SystemParamValidationError}; use crate::{ schedule::InternedSystemSet, system::{input::SystemInput, SystemIn}, @@ -162,12 +162,6 @@ where }) } - #[inline] - fn run(&mut self, input: SystemIn<'_, Self>, world: &mut crate::prelude::World) -> Self::Out { - self.func - .adapt(input, |input| self.system.run(input, world)) - } - #[inline] fn apply_deferred(&mut self, world: &mut crate::prelude::World) { self.system.apply_deferred(world); @@ -179,7 +173,10 @@ where } #[inline] - unsafe fn validate_param_unsafe(&mut self, world: UnsafeWorldCell) -> bool { + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { // SAFETY: Delegate to other `System` implementations. unsafe { self.system.validate_param_unsafe(world) } } diff --git a/crates/bevy_ecs/src/system/builder.rs b/crates/bevy_ecs/src/system/builder.rs index a5556f123b..6261b9e355 100644 --- a/crates/bevy_ecs/src/system/builder.rs +++ b/crates/bevy_ecs/src/system/builder.rs @@ -715,9 +715,11 @@ mod tests { use crate::{ entity::Entities, prelude::{Component, Query}, + reflect::ReflectResource, system::{Local, RunSystemOnce}, }; use alloc::vec; + use bevy_reflect::{FromType, Reflect, ReflectRef}; use super::*; @@ -730,8 +732,11 @@ mod tests { #[derive(Component)] struct C; - #[derive(Resource, Default)] - struct R; + #[derive(Resource, Default, Reflect)] + #[reflect(Resource)] + struct R { + foo: usize, + } fn local_system(local: Local) -> u64 { *local @@ -1071,4 +1076,31 @@ mod tests { .build_state(&mut world) .build_system(|_r: ResMut, _fr: FilteredResourcesMut| {}); } + + #[test] + fn filtered_resource_reflect() { + let mut world = World::new(); + world.insert_resource(R { foo: 7 }); + + let system = (FilteredResourcesParamBuilder::new(|builder| { + builder.add_read::(); + }),) + .build_state(&mut world) + .build_system(|res: FilteredResources| { + let reflect_resource = >::from_type(); + let ReflectRef::Struct(reflect_struct) = + reflect_resource.reflect(res).unwrap().reflect_ref() + else { + panic!() + }; + *reflect_struct + .field("foo") + .unwrap() + .try_downcast_ref::() + .unwrap() + }); + + let output = world.run_system_once(system).unwrap(); + assert_eq!(output, 7); + } } diff --git a/crates/bevy_ecs/src/system/combinator.rs b/crates/bevy_ecs/src/system/combinator.rs index f6e696a106..2b22931ba6 100644 --- a/crates/bevy_ecs/src/system/combinator.rs +++ b/crates/bevy_ecs/src/system/combinator.rs @@ -7,7 +7,7 @@ use crate::{ prelude::World, query::Access, schedule::InternedSystemSet, - system::{input::SystemInput, SystemIn}, + system::{input::SystemInput, SystemIn, SystemParamValidationError}, world::unsafe_world_cell::UnsafeWorldCell, }; @@ -176,6 +176,7 @@ where input, // SAFETY: The world accesses for both underlying systems have been registered, // so the caller will guarantee that no other systems will conflict with `a` or `b`. + // If either system has `is_exclusive()`, then the combined system also has `is_exclusive`. // Since these closures are `!Send + !Sync + !'static`, they can never be called // in parallel, so their world accesses will not conflict with each other. // Additionally, `update_archetype_component_access` has been called, @@ -186,19 +187,6 @@ where ) } - fn run(&mut self, input: SystemIn<'_, Self>, world: &mut World) -> Self::Out { - let world = world.as_unsafe_world_cell(); - Func::combine( - input, - // SAFETY: Since these closures are `!Send + !Sync + !'static`, they can never - // be called in parallel. Since mutable access to `world` only exists within - // the scope of either closure, we can be sure they will never alias one another. - |input| self.a.run(input, unsafe { world.world_mut() }), - // SAFETY: See the above safety comment. - |input| self.b.run(input, unsafe { world.world_mut() }), - ) - } - #[inline] fn apply_deferred(&mut self, world: &mut World) { self.a.apply_deferred(world); @@ -212,7 +200,10 @@ where } #[inline] - unsafe fn validate_param_unsafe(&mut self, world: UnsafeWorldCell) -> bool { + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { // SAFETY: Delegate to other `System` implementations. unsafe { self.a.validate_param_unsafe(world) } } @@ -416,11 +407,6 @@ where self.b.run_unsafe(value, world) } - fn run(&mut self, input: SystemIn<'_, Self>, world: &mut World) -> Self::Out { - let value = self.a.run(input, world); - self.b.run(value, world) - } - fn apply_deferred(&mut self, world: &mut World) { self.a.apply_deferred(world); self.b.apply_deferred(world); @@ -431,13 +417,27 @@ where self.b.queue_deferred(world); } - unsafe fn validate_param_unsafe(&mut self, world: UnsafeWorldCell) -> bool { - // SAFETY: Delegate to other `System` implementations. - unsafe { self.a.validate_param_unsafe(world) } - } + /// This method uses "early out" logic: if the first system fails validation, + /// the second system is not validated. + /// + /// Because the system validation is performed upfront, this can lead to situations + /// where later systems pass validation, but fail at runtime due to changes made earlier + /// in the piped systems. + // TODO: ensure that systems are only validated just before they are run. + // Fixing this will require fundamentally rethinking how piped systems work: + // they're currently treated as a single system from the perspective of the scheduler. + // See https://github.com/bevyengine/bevy/issues/18796 + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { + // SAFETY: Delegate to the `System` implementation for `a`. + unsafe { self.a.validate_param_unsafe(world) }?; - fn validate_param(&mut self, world: &World) -> bool { - self.a.validate_param(world) && self.b.validate_param(world) + // SAFETY: Delegate to the `System` implementation for `b`. + unsafe { self.b.validate_param_unsafe(world) }?; + + Ok(()) } fn initialize(&mut self, world: &mut World) { @@ -486,3 +486,27 @@ where for<'a> B::In: SystemInput = A::Out>, { } + +#[cfg(test)] +mod tests { + + #[test] + fn exclusive_system_piping_is_possible() { + use crate::prelude::*; + + fn my_exclusive_system(_world: &mut World) -> u32 { + 1 + } + + fn out_pipe(input: In) { + assert!(input.0 == 1); + } + + let mut world = World::new(); + + let mut schedule = Schedule::default(); + schedule.add_systems(my_exclusive_system.pipe(out_pipe)); + + schedule.run(&mut world); + } +} diff --git a/crates/bevy_ecs/src/system/commands/command.rs b/crates/bevy_ecs/src/system/commands/command.rs index c9383a1ee0..af7b88edfc 100644 --- a/crates/bevy_ecs/src/system/commands/command.rs +++ b/crates/bevy_ecs/src/system/commands/command.rs @@ -1,21 +1,19 @@ -//! This module contains the definition of the [`Command`] trait, as well as -//! blanket implementations of the trait for closures. +//! Contains the definition of the [`Command`] trait, +//! as well as the blanket implementation of the trait for closures. //! //! It also contains functions that return closures for use with //! [`Commands`](crate::system::Commands). -#[cfg(feature = "track_location")] -use core::panic::Location; - use crate::{ bundle::{Bundle, InsertMode, NoBundleEffect}, + change_detection::MaybeLocation, entity::Entity, + error::Result, event::{Event, Events}, observer::TriggerTargets, resource::Resource, - result::{Error, Result}, schedule::ScheduleLabel, - system::{error_handler, IntoSystem, SystemId, SystemInput}, + system::{IntoSystem, SystemId, SystemInput}, world::{FromWorld, SpawnBatchIter, World}, }; @@ -65,45 +63,6 @@ where } } -/// Takes a [`Command`] that returns a Result and uses a given error handler function to convert it into -/// a [`Command`] that internally handles an error if it occurs and returns `()`. -pub trait HandleError { - /// Takes a [`Command`] that returns a Result and uses a given error handler function to convert it into - /// a [`Command`] that internally handles an error if it occurs and returns `()`. - fn handle_error_with(self, error_handler: fn(&mut World, Error)) -> impl Command; - /// Takes a [`Command`] that returns a Result and uses the default error handler function to convert it into - /// a [`Command`] that internally handles an error if it occurs and returns `()`. - fn handle_error(self) -> impl Command - where - Self: Sized, - { - self.handle_error_with(error_handler::default()) - } -} - -impl>, T, E: Into> HandleError> for C { - fn handle_error_with(self, error_handler: fn(&mut World, Error)) -> impl Command { - move |world: &mut World| match self.apply(world) { - Ok(_) => {} - Err(err) => (error_handler)(world, err.into()), - } - } -} - -impl HandleError for C { - #[inline] - fn handle_error_with(self, _error_handler: fn(&mut World, Error)) -> impl Command { - self - } - #[inline] - fn handle_error(self) -> impl Command - where - Self: Sized, - { - self - } -} - /// A [`Command`] that consumes an iterator of [`Bundles`](Bundle) to spawn a series of entities. /// /// This is more efficient than spawning the entities individually. @@ -113,15 +72,9 @@ where I: IntoIterator + Send + Sync + 'static, I::Item: Bundle, { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |world: &mut World| { - SpawnBatchIter::new( - world, - bundles_iter.into_iter(), - #[cfg(feature = "track_location")] - caller, - ); + SpawnBatchIter::new(world, bundles_iter.into_iter(), caller); } } @@ -137,15 +90,9 @@ where I: IntoIterator + Send + Sync + 'static, B: Bundle, { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |world: &mut World| -> Result { - world.try_insert_batch_with_caller( - batch, - insert_mode, - #[cfg(feature = "track_location")] - caller, - )?; + world.try_insert_batch_with_caller(batch, insert_mode, caller)?; Ok(()) } } @@ -162,14 +109,9 @@ pub fn init_resource() -> impl Command { /// A [`Command`] that inserts a [`Resource`] into the world. #[track_caller] pub fn insert_resource(resource: R) -> impl Command { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |world: &mut World| { - world.insert_resource_with_caller( - resource, - #[cfg(feature = "track_location")] - caller, - ); + world.insert_resource_with_caller(resource, caller); } } @@ -241,8 +183,10 @@ where } } -/// A [`Command`] that removes a system previously registered with -/// [`World::register_system_cached`]. +/// A [`Command`] that removes a system previously registered with one of the following: +/// - [`Commands::run_system_cached`](crate::system::Commands::run_system_cached) +/// - [`World::run_system_cached`] +/// - [`World::register_system_cached`] pub fn unregister_system_cached(system: S) -> impl Command where I: SystemInput + Send + 'static, @@ -267,14 +211,9 @@ pub fn run_schedule(label: impl ScheduleLabel) -> impl Command { /// A [`Command`] that sends a global [`Trigger`](crate::observer::Trigger) without any targets. #[track_caller] pub fn trigger(event: impl Event) -> impl Command { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |world: &mut World| { - world.trigger_with_caller( - event, - #[cfg(feature = "track_location")] - caller, - ); + world.trigger_with_caller(event, caller); } } @@ -283,29 +222,18 @@ pub fn trigger_targets( event: impl Event, targets: impl TriggerTargets + Send + Sync + 'static, ) -> impl Command { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |world: &mut World| { - world.trigger_targets_with_caller( - event, - targets, - #[cfg(feature = "track_location")] - caller, - ); + world.trigger_targets_with_caller(event, targets, caller); } } /// A [`Command`] that sends an arbitrary [`Event`]. #[track_caller] pub fn send_event(event: E) -> impl Command { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |world: &mut World| { let mut events = world.resource_mut::>(); - events.send_with_caller( - event, - #[cfg(feature = "track_location")] - caller, - ); + events.send_with_caller(event, caller); } } diff --git a/crates/bevy_ecs/src/system/commands/entity_command.rs b/crates/bevy_ecs/src/system/commands/entity_command.rs index e0b0bffc69..317ad8476a 100644 --- a/crates/bevy_ecs/src/system/commands/entity_command.rs +++ b/crates/bevy_ecs/src/system/commands/entity_command.rs @@ -1,5 +1,5 @@ -//! This module contains the definition of the [`EntityCommand`] trait, as well as -//! blanket implementations of the trait for closures. +//! Contains the definition of the [`EntityCommand`] trait, +//! as well as the blanket implementation of the trait for closures. //! //! It also contains functions that return closures for use with //! [`EntityCommands`](crate::system::EntityCommands). @@ -7,17 +7,15 @@ use alloc::vec::Vec; use log::info; -#[cfg(feature = "track_location")] -use core::panic::Location; - use crate::{ bundle::{Bundle, InsertMode}, + change_detection::MaybeLocation, component::{Component, ComponentId, ComponentInfo}, entity::{Entity, EntityClonerBuilder}, event::Event, - result::Result, - system::{command::HandleError, Command, IntoObserverSystem}, - world::{error::EntityFetchError, EntityWorldMut, FromWorld, World}, + relationship::RelationshipHookMode, + system::IntoObserverSystem, + world::{error::EntityMutableFetchError, EntityWorldMut, FromWorld}, }; use bevy_ptr::OwningPtr; @@ -81,62 +79,16 @@ use bevy_ptr::OwningPtr; /// } /// ``` pub trait EntityCommand: Send + 'static { - /// Executes this command for the given [`Entity`] and - /// returns a [`Result`] for error handling. + /// Executes this command for the given [`Entity`]. fn apply(self, entity: EntityWorldMut) -> Out; } -/// Passes in a specific entity to an [`EntityCommand`], resulting in a [`Command`] that -/// internally runs the [`EntityCommand`] on that entity. -/// -// NOTE: This is a separate trait from `EntityCommand` because "result-returning entity commands" and -// "non-result returning entity commands" require different implementations, so they cannot be automatically -// implemented. And this isn't the type of implementation that we want to thrust on people implementing -// EntityCommand. -pub trait CommandWithEntity { - /// Passes in a specific entity to an [`EntityCommand`], resulting in a [`Command`] that - /// internally runs the [`EntityCommand`] on that entity. - fn with_entity(self, entity: Entity) -> impl Command + HandleError; -} - -impl CommandWithEntity> for C { - fn with_entity( - self, - entity: Entity, - ) -> impl Command> + HandleError> - { - move |world: &mut World| -> Result<(), EntityFetchError> { - let entity = world.get_entity_mut(entity)?; - self.apply(entity); - Ok(()) - } - } -} - -impl< - C: EntityCommand>, - T, - Err: core::fmt::Debug + core::fmt::Display + Send + Sync + 'static, - > CommandWithEntity>> for C -{ - fn with_entity( - self, - entity: Entity, - ) -> impl Command>> + HandleError>> - { - move |world: &mut World| { - let entity = world.get_entity_mut(entity)?; - self.apply(entity) - .map_err(EntityCommandError::CommandFailed) - } - } -} /// An error that occurs when running an [`EntityCommand`] on a specific entity. #[derive(thiserror::Error, Debug)] pub enum EntityCommandError { /// The entity this [`EntityCommand`] tried to run on could not be fetched. #[error(transparent)] - EntityFetchError(#[from] EntityFetchError), + EntityFetchError(#[from] EntityMutableFetchError), /// An error that occurred while running the [`EntityCommand`]. #[error("{0}")] CommandFailed(E), @@ -151,43 +103,28 @@ where } } -/// An [`EntityCommand`] that adds the components in a [`Bundle`] to an entity, -/// replacing any that were already present. +/// An [`EntityCommand`] that adds the components in a [`Bundle`] to an entity. #[track_caller] -pub fn insert(bundle: impl Bundle) -> impl EntityCommand { - #[cfg(feature = "track_location")] - let caller = Location::caller(); +pub fn insert(bundle: impl Bundle, mode: InsertMode) -> impl EntityCommand { + let caller = MaybeLocation::caller(); move |mut entity: EntityWorldMut| { - entity.insert_with_caller( - bundle, - InsertMode::Replace, - #[cfg(feature = "track_location")] - caller, - ); - } -} - -/// An [`EntityCommand`] that adds the components in a [`Bundle`] to an entity, -/// except for any that were already present. -#[track_caller] -pub fn insert_if_new(bundle: impl Bundle) -> impl EntityCommand { - #[cfg(feature = "track_location")] - let caller = Location::caller(); - move |mut entity: EntityWorldMut| { - entity.insert_with_caller( - bundle, - InsertMode::Keep, - #[cfg(feature = "track_location")] - caller, - ); + entity.insert_with_caller(bundle, mode, caller, RelationshipHookMode::Run); } } /// An [`EntityCommand`] that adds a dynamic component to an entity. +/// +/// # Safety +/// +/// - [`ComponentId`] must be from the same world as the target entity. +/// - `T` must have the same layout as the one passed during `component_id` creation. #[track_caller] -pub fn insert_by_id(component_id: ComponentId, value: T) -> impl EntityCommand { - #[cfg(feature = "track_location")] - let caller = Location::caller(); +pub unsafe fn insert_by_id( + component_id: ComponentId, + value: T, + mode: InsertMode, +) -> impl EntityCommand { + let caller = MaybeLocation::caller(); move |mut entity: EntityWorldMut| { // SAFETY: // - `component_id` safety is ensured by the caller @@ -196,8 +133,9 @@ pub fn insert_by_id(component_id: ComponentId, value: T) -> i entity.insert_by_id_with_caller( component_id, ptr, - #[cfg(feature = "track_location")] + mode, caller, + RelationshipHookMode::Run, ); }); } @@ -207,29 +145,19 @@ pub fn insert_by_id(component_id: ComponentId, value: T) -> i /// the component's [`FromWorld`] implementation. #[track_caller] pub fn insert_from_world(mode: InsertMode) -> impl EntityCommand { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |mut entity: EntityWorldMut| { let value = entity.world_scope(|world| T::from_world(world)); - entity.insert_with_caller( - value, - mode, - #[cfg(feature = "track_location")] - caller, - ); + entity.insert_with_caller(value, mode, caller, RelationshipHookMode::Run); } } /// An [`EntityCommand`] that removes the components in a [`Bundle`] from an entity. #[track_caller] pub fn remove() -> impl EntityCommand { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |mut entity: EntityWorldMut| { - entity.remove_with_caller::( - #[cfg(feature = "track_location")] - caller, - ); + entity.remove_with_caller::(caller); } } @@ -237,40 +165,27 @@ pub fn remove() -> impl EntityCommand { /// as well as the required components for each component removed. #[track_caller] pub fn remove_with_requires() -> impl EntityCommand { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |mut entity: EntityWorldMut| { - entity.remove_with_requires_with_caller::( - #[cfg(feature = "track_location")] - caller, - ); + entity.remove_with_requires_with_caller::(caller); } } /// An [`EntityCommand`] that removes a dynamic component from an entity. #[track_caller] pub fn remove_by_id(component_id: ComponentId) -> impl EntityCommand { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |mut entity: EntityWorldMut| { - entity.remove_by_id_with_caller( - component_id, - #[cfg(feature = "track_location")] - caller, - ); + entity.remove_by_id_with_caller(component_id, caller); } } /// An [`EntityCommand`] that removes all components from an entity. #[track_caller] pub fn clear() -> impl EntityCommand { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |mut entity: EntityWorldMut| { - entity.clear_with_caller( - #[cfg(feature = "track_location")] - caller, - ); + entity.clear_with_caller(caller); } } @@ -278,13 +193,9 @@ pub fn clear() -> impl EntityCommand { /// except for those in the given [`Bundle`]. #[track_caller] pub fn retain() -> impl EntityCommand { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |mut entity: EntityWorldMut| { - entity.retain_with_caller::( - #[cfg(feature = "track_location")] - caller, - ); + entity.retain_with_caller::(caller); } } @@ -292,17 +203,15 @@ pub fn retain() -> impl EntityCommand { /// /// # Note /// -/// This will also despawn any [`Children`](crate::hierarchy::Children) entities, and any other [`RelationshipTarget`](crate::relationship::RelationshipTarget) that is configured -/// to despawn descendants. This results in "recursive despawn" behavior. +/// This will also despawn the entities in any [`RelationshipTarget`](crate::relationship::RelationshipTarget) +/// that is configured to despawn descendants. +/// +/// For example, this will recursively despawn [`Children`](crate::hierarchy::Children). #[track_caller] pub fn despawn() -> impl EntityCommand { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |entity: EntityWorldMut| { - entity.despawn_with_caller( - #[cfg(feature = "track_location")] - caller, - ); + entity.despawn_with_caller(caller); } } @@ -312,14 +221,23 @@ pub fn despawn() -> impl EntityCommand { pub fn observe( observer: impl IntoObserverSystem, ) -> impl EntityCommand { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); move |mut entity: EntityWorldMut| { - entity.observe_with_caller( - observer, - #[cfg(feature = "track_location")] - caller, - ); + entity.observe_with_caller(observer, caller); + } +} + +/// An [`EntityCommand`] that sends a [`Trigger`](crate::observer::Trigger) targeting an entity. +/// +/// This will run any [`Observer`](crate::observer::Observer) of the given [`Event`] watching the entity. +#[track_caller] +pub fn trigger(event: impl Event) -> impl EntityCommand { + let caller = MaybeLocation::caller(); + move |mut entity: EntityWorldMut| { + let id = entity.id(); + entity.world_scope(|world| { + world.trigger_targets_with_caller(event, id, caller); + }); } } @@ -356,6 +274,7 @@ pub fn log_components() -> impl EntityCommand { let debug_infos: Vec<_> = entity .world() .inspect_entity(entity.id()) + .expect("Entity existence is verified before an EntityCommand is executed") .map(ComponentInfo::name) .collect(); info!("Entity {}: {debug_infos:?}", entity.id()); diff --git a/crates/bevy_ecs/src/system/commands/error_handler.rs b/crates/bevy_ecs/src/system/commands/error_handler.rs deleted file mode 100644 index 231df9ec73..0000000000 --- a/crates/bevy_ecs/src/system/commands/error_handler.rs +++ /dev/null @@ -1,61 +0,0 @@ -//! This module contains convenience functions that return simple error handlers -//! for use with [`Commands::queue_handled`](super::Commands::queue_handled) and [`EntityCommands::queue_handled`](super::EntityCommands::queue_handled). - -use crate::{result::Error, world::World}; -use log::{error, warn}; - -/// An error handler that does nothing. -pub fn silent() -> fn(&mut World, Error) { - |_, _| {} -} - -/// An error handler that accepts an error and logs it with [`warn!`]. -pub fn warn() -> fn(&mut World, Error) { - |_, error| warn!("{error}") -} - -/// An error handler that accepts an error and logs it with [`error!`]. -pub fn error() -> fn(&mut World, Error) { - |_, error| error!("{error}") -} - -/// An error handler that accepts an error and panics with the error in -/// the panic message. -pub fn panic() -> fn(&mut World, Error) { - |_, error| panic!("{error}") -} - -/// The default error handler. This defaults to [`panic()`]. If the -/// `configurable_error_handler` cargo feature is enabled, then -/// `GLOBAL_ERROR_HANDLER` will be used instead, enabling error handler customization. -#[cfg(not(feature = "configurable_error_handler"))] -#[inline] -pub fn default() -> fn(&mut World, Error) { - panic() -} - -/// A global error handler. This can be set at startup, as long as it is set before -/// any uses. This should generally be configured _before_ initializing the app. -/// -/// If the `configurable_error_handler` cargo feature is enabled, this will be used -/// by default. -/// -/// This should be set in the following way: -/// -/// ``` -/// # use bevy_ecs::system::error_handler::{GLOBAL_ERROR_HANDLER, warn}; -/// GLOBAL_ERROR_HANDLER.set(warn()); -/// // initialize Bevy App here -/// ``` -#[cfg(feature = "configurable_error_handler")] -pub static GLOBAL_ERROR_HANDLER: std::sync::OnceLock = - std::sync::OnceLock::new(); - -/// The default error handler. This defaults to [`panic()`]. If the -/// `configurable_error_handler` cargo feature is enabled, then -/// [`GLOBAL_ERROR_HANDLER`] will be used instead, enabling error handler customization. -#[cfg(feature = "configurable_error_handler")] -#[inline] -pub fn default() -> fn(&mut World, Error) { - *GLOBAL_ERROR_HANDLER.get_or_init(|| panic()) -} diff --git a/crates/bevy_ecs/src/system/commands/mod.rs b/crates/bevy_ecs/src/system/commands/mod.rs index 4292ccb0c5..4cb6d61bc0 100644 --- a/crates/bevy_ecs/src/system/commands/mod.rs +++ b/crates/bevy_ecs/src/system/commands/mod.rs @@ -1,6 +1,5 @@ pub mod command; pub mod entity_command; -pub mod error_handler; #[cfg(feature = "std")] mod parallel_scope; @@ -13,23 +12,22 @@ pub use parallel_scope::*; use alloc::boxed::Box; use core::marker::PhantomData; -use core::panic::Location; use log::error; use crate::{ self as bevy_ecs, bundle::{Bundle, InsertMode, NoBundleEffect}, - change_detection::Mut, + change_detection::{MaybeLocation, Mut}, component::{Component, ComponentId, Mutable}, - entity::{Entities, Entity, EntityClonerBuilder}, + entity::{Entities, Entity, EntityClonerBuilder, EntityDoesNotExistError}, + error::{ignore, warn, BevyError, CommandWithEntity, ErrorContext, HandleError}, event::Event, observer::{Observer, TriggerTargets}, resource::Resource, - result::Error, schedule::ScheduleLabel, system::{ - command::HandleError, entity_command::CommandWithEntity, input::SystemInput, Deferred, - IntoObserverSystem, IntoSystem, RegisteredSystem, SystemId, + Deferred, IntoObserverSystem, IntoSystem, RegisteredSystem, SystemId, SystemInput, + SystemParamValidationError, }, world::{ command_queue::RawCommandQueue, unsafe_world_cell::UnsafeWorldCell, CommandQueue, @@ -61,7 +59,6 @@ use crate::{ /// /// ``` /// # use bevy_ecs::prelude::*; -/// # /// fn my_system(mut commands: Commands) { /// // ... /// } @@ -83,23 +80,22 @@ use crate::{ /// // NOTE: type inference fails here, so annotations are required on the closure. /// commands.queue(|w: &mut World| { /// // Mutate the world however you want... -/// # todo!(); /// }); /// # } /// ``` /// /// # Error handling /// -/// Commands can return a [`Result`](crate::result::Result), which can be passed to -/// an error handler. Error handlers are functions/closures of the form -/// `fn(&mut World, CommandError)`. +/// A [`Command`] can return a [`Result`](crate::error::Result), +/// which will be passed to an [error handler](crate::error) if the `Result` is an error. /// -/// The default error handler panics. It can be configured by enabling the `configurable_error_handler` -/// cargo feature, then setting the `GLOBAL_ERROR_HANDLER`. +/// The [default error handler](crate::error::default_error_handler) panics. +/// It can be configured by setting the `GLOBAL_ERROR_HANDLER`. /// -/// Alternatively, you can customize the error handler for a specific command by calling [`Commands::queue_handled`]. +/// Alternatively, you can customize the error handler for a specific command +/// by calling [`Commands::queue_handled`]. /// -/// The [`error_handler`] module provides some simple error handlers for convenience. +/// The [`error`](crate::error) module provides some simple error handlers for convenience. /// /// [`ApplyDeferred`]: crate::schedule::ApplyDeferred pub struct Commands<'w, 's> { @@ -181,7 +177,7 @@ const _: () = { state: &Self::State, system_meta: &bevy_ecs::system::SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { <(Deferred, &Entities) as bevy_ecs::system::SystemParam>::validate_param( &state.state, system_meta, @@ -219,19 +215,11 @@ enum InternalQueue<'s> { impl<'w, 's> Commands<'w, 's> { /// Returns a new `Commands` instance from a [`CommandQueue`] and a [`World`]. - /// - /// It is not required to call this constructor when using `Commands` as a [system parameter]. - /// - /// [system parameter]: crate::system::SystemParam pub fn new(queue: &'s mut CommandQueue, world: &'w World) -> Self { Self::new_from_entities(queue, &world.entities) } /// Returns a new `Commands` instance from a [`CommandQueue`] and an [`Entities`] reference. - /// - /// It is not required to call this constructor when using `Commands` as a [system parameter]. - /// - /// [system parameter]: crate::system::SystemParam pub fn new_from_entities(queue: &'s mut CommandQueue, entities: &'w Entities) -> Self { Self { queue: InternalQueue::CommandQueue(Deferred(queue)), @@ -245,7 +233,7 @@ impl<'w, 's> Commands<'w, 's> { /// /// # Safety /// - /// * Caller ensures that `queue` must outlive 'w + /// * Caller ensures that `queue` must outlive `'w` pub(crate) unsafe fn new_raw_from_entities( queue: RawCommandQueue, entities: &'w Entities, @@ -257,9 +245,10 @@ impl<'w, 's> Commands<'w, 's> { } /// Returns a [`Commands`] with a smaller lifetime. + /// /// This is useful if you have `&mut Commands` but need `Commands`. /// - /// # Examples + /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; @@ -286,7 +275,7 @@ impl<'w, 's> Commands<'w, 's> { } } - /// Take all commands from `other` and append them to `self`, leaving `other` empty + /// Take all commands from `other` and append them to `self`, leaving `other` empty. pub fn append(&mut self, other: &mut CommandQueue) { match &mut self.queue { InternalQueue::CommandQueue(queue) => queue.bytes.append(&mut other.bytes), @@ -297,15 +286,12 @@ impl<'w, 's> Commands<'w, 's> { } } - /// Reserves a new empty [`Entity`] to be spawned, and returns its corresponding [`EntityCommands`]. - /// - /// See [`World::spawn_empty`] for more details. + /// Spawns a new empty [`Entity`] and returns its corresponding [`EntityCommands`]. /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// /// #[derive(Component)] /// struct Label(&'static str); /// #[derive(Component)] @@ -314,14 +300,14 @@ impl<'w, 's> Commands<'w, 's> { /// struct Agility(u32); /// /// fn example_system(mut commands: Commands) { - /// // Create a new empty entity and retrieve its id. - /// let empty_entity = commands.spawn_empty().id(); + /// // Create a new empty entity. + /// commands.spawn_empty(); /// - /// // Create another empty entity, then add some component to it + /// // Create another empty entity. /// commands.spawn_empty() - /// // adds a new component bundle to the entity + /// // Add a new component bundle to the entity. /// .insert((Strength(1), Agility(2))) - /// // adds a single component to the entity + /// // Add a single component to the entity. /// .insert(Label("hello world")); /// } /// # bevy_ecs::system::assert_is_system(example_system); @@ -329,8 +315,9 @@ impl<'w, 's> Commands<'w, 's> { /// /// # See also /// - /// - [`spawn`](Self::spawn) to spawn an entity with a bundle. - /// - [`spawn_batch`](Self::spawn_batch) to spawn entities with a bundle each. + /// - [`spawn`](Self::spawn) to spawn an entity with components. + /// - [`spawn_batch`](Self::spawn_batch) to spawn many entities + /// with the same combination of components. pub fn spawn_empty(&mut self) -> EntityCommands { let entity = self.entities.reserve_entity(); EntityCommands { @@ -339,51 +326,39 @@ impl<'w, 's> Commands<'w, 's> { } } - /// Pushes a [`Command`] to the queue for creating a new entity with the given [`Bundle`]'s components, - /// and returns its corresponding [`EntityCommands`]. + /// Spawns a new [`Entity`] with the given components + /// and returns the entity's corresponding [`EntityCommands`]. /// - /// In case multiple bundles of the same [`Bundle`] type need to be spawned, - /// [`spawn_batch`](Self::spawn_batch) should be used for better performance. + /// To spawn many entities with the same combination of components, + /// [`spawn_batch`](Self::spawn_batch) can be used for better performance. /// /// # Example /// /// ``` - /// use bevy_ecs::prelude::*; - /// + /// # use bevy_ecs::prelude::*; /// #[derive(Component)] - /// struct Component1; + /// struct ComponentA(u32); /// #[derive(Component)] - /// struct Component2; - /// #[derive(Component)] - /// struct Label(&'static str); - /// #[derive(Component)] - /// struct Strength(u32); - /// #[derive(Component)] - /// struct Agility(u32); + /// struct ComponentB(u32); /// /// #[derive(Bundle)] /// struct ExampleBundle { - /// a: Component1, - /// b: Component2, + /// a: ComponentA, + /// b: ComponentB, /// } /// /// fn example_system(mut commands: Commands) { /// // Create a new entity with a single component. - /// commands.spawn(Component1); + /// commands.spawn(ComponentA(1)); + /// + /// // Create a new entity with two components using a "tuple bundle". + /// commands.spawn((ComponentA(2), ComponentB(1))); /// /// // Create a new entity with a component bundle. /// commands.spawn(ExampleBundle { - /// a: Component1, - /// b: Component2, + /// a: ComponentA(3), + /// b: ComponentB(2), /// }); - /// - /// commands - /// // Create a new entity with two components using a "tuple bundle". - /// .spawn((Component1, Component2)) - /// // `spawn returns a builder, so you can insert more bundles like this: - /// .insert((Strength(1), Agility(2))) - /// // or insert single components like this: - /// .insert(Label("hello world")); /// } /// # bevy_ecs::system::assert_is_system(example_system); /// ``` @@ -391,7 +366,8 @@ impl<'w, 's> Commands<'w, 's> { /// # See also /// /// - [`spawn_empty`](Self::spawn_empty) to spawn an entity without any components. - /// - [`spawn_batch`](Self::spawn_batch) to spawn entities with a bundle each. + /// - [`spawn_batch`](Self::spawn_batch) to spawn many entities + /// with the same combination of components. #[track_caller] pub fn spawn(&mut self, bundle: T) -> EntityCommands { let mut entity = self.spawn_empty(); @@ -399,36 +375,26 @@ impl<'w, 's> Commands<'w, 's> { entity } - /// Returns the [`EntityCommands`] for the requested [`Entity`]. + /// Returns the [`EntityCommands`] for the given [`Entity`]. /// - /// This method does not guarantee that commands queued by the `EntityCommands` + /// This method does not guarantee that commands queued by the returned `EntityCommands` /// will be successful, since the entity could be despawned before they are executed. /// - /// # Panics - /// - /// This method panics if the requested entity does not exist. - /// /// # Example /// /// ``` - /// use bevy_ecs::prelude::*; + /// # use bevy_ecs::prelude::*; + /// #[derive(Resource)] + /// struct PlayerEntity { + /// entity: Entity + /// } /// /// #[derive(Component)] /// struct Label(&'static str); - /// #[derive(Component)] - /// struct Strength(u32); - /// #[derive(Component)] - /// struct Agility(u32); /// - /// fn example_system(mut commands: Commands) { - /// // Create a new, empty entity - /// let entity = commands.spawn_empty().id(); - /// - /// commands.entity(entity) - /// // adds a new component bundle to the entity - /// .insert((Strength(1), Agility(2))) - /// // adds a single component to the entity - /// .insert(Label("hello world")); + /// fn example_system(mut commands: Commands, player: Res) { + /// // Get the entity and add a component. + /// commands.entity(player.entity).insert(Label("hello world")); /// } /// # bevy_ecs::system::assert_is_system(example_system); /// ``` @@ -439,115 +405,111 @@ impl<'w, 's> Commands<'w, 's> { #[inline] #[track_caller] pub fn entity(&mut self, entity: Entity) -> EntityCommands { - #[inline(never)] - #[cold] - #[track_caller] - fn panic_no_entity(entities: &Entities, entity: Entity) -> ! { - panic!( - "Attempting to create an EntityCommands for entity {entity}, which {}", - entities.entity_does_not_exist_error_details(entity) - ); - } - - if self.get_entity(entity).is_some() { - EntityCommands { - entity, - commands: self.reborrow(), - } - } else { - panic_no_entity(self.entities, entity) + EntityCommands { + entity, + commands: self.reborrow(), } } - /// Returns the [`EntityCommands`] for the requested [`Entity`], if it exists. + /// Returns the [`EntityCommands`] for the requested [`Entity`] if it exists. /// - /// Returns `None` if the entity does not exist. - /// - /// This method does not guarantee that commands queued by the `EntityCommands` + /// This method does not guarantee that commands queued by the returned `EntityCommands` /// will be successful, since the entity could be despawned before they are executed. /// + /// # Errors + /// + /// Returns [`EntityDoesNotExistError`] if the requested entity does not exist. + /// + /// # Example + /// + /// ``` + /// # use bevy_ecs::prelude::*; + /// #[derive(Resource)] + /// struct PlayerEntity { + /// entity: Entity + /// } + /// + /// #[derive(Component)] + /// struct Label(&'static str); + /// + /// fn example_system(mut commands: Commands, player: Res) -> Result { + /// // Get the entity if it still exists and store the `EntityCommands`. + /// // If it doesn't exist, the `?` operator will propagate the returned error + /// // to the system, and the system will pass it to an error handler. + /// let mut entity_commands = commands.get_entity(player.entity)?; + /// + /// // Add a component to the entity. + /// entity_commands.insert(Label("hello world")); + /// + /// // Return from the system successfully. + /// Ok(()) + /// } + /// # bevy_ecs::system::assert_is_system(example_system); + /// ``` + /// + /// # See also + /// + /// - [`entity`](Self::entity) for the infallible version. + #[inline] + #[track_caller] + pub fn get_entity( + &mut self, + entity: Entity, + ) -> Result { + if self.entities.contains(entity) { + Ok(EntityCommands { + entity, + commands: self.reborrow(), + }) + } else { + Err(EntityDoesNotExistError::new(entity, self.entities)) + } + } + + /// Spawns multiple entities with the same combination of components, + /// based on a batch of [`Bundles`](Bundle). + /// + /// A batch can be any type that implements [`IntoIterator`] and contains bundles, + /// such as a [`Vec`](alloc::vec::Vec) or an array `[Bundle; N]`. + /// + /// This method is equivalent to iterating the batch + /// and calling [`spawn`](Self::spawn) for each bundle, + /// but is faster by pre-allocating memory and having exclusive [`World`] access. + /// /// # Example /// /// ``` /// use bevy_ecs::prelude::*; /// /// #[derive(Component)] - /// struct Label(&'static str); - /// fn example_system(mut commands: Commands) { - /// // Create a new, empty entity - /// let entity = commands.spawn_empty().id(); + /// struct Score(u32); /// - /// // Get the entity if it still exists, which it will in this case - /// if let Some(mut entity_commands) = commands.get_entity(entity) { - /// // adds a single component to the entity - /// entity_commands.insert(Label("hello world")); - /// } + /// fn example_system(mut commands: Commands) { + /// commands.spawn_batch([ + /// (Name::new("Alice"), Score(0)), + /// (Name::new("Bob"), Score(0)), + /// ]); /// } /// # bevy_ecs::system::assert_is_system(example_system); /// ``` /// /// # See also /// - /// - [`entity`](Self::entity) for the panicking version. - #[inline] + /// - [`spawn`](Self::spawn) to spawn an entity with components. + /// - [`spawn_empty`](Self::spawn_empty) to spawn an entity without components. #[track_caller] - pub fn get_entity(&mut self, entity: Entity) -> Option { - self.entities.contains(entity).then_some(EntityCommands { - entity, - commands: self.reborrow(), - }) - } - - /// Pushes a [`Command`] to the queue for creating entities with a particular [`Bundle`] type. - /// - /// `bundles_iter` is a type that can be converted into a [`Bundle`] iterator - /// (it can also be a collection). - /// - /// This method is equivalent to iterating `bundles_iter` - /// and calling [`spawn`](Self::spawn) on each bundle, - /// but it is faster due to memory pre-allocation. - /// - /// # Example - /// - /// ``` - /// # use bevy_ecs::prelude::*; - /// # - /// # #[derive(Component)] - /// # struct Name(String); - /// # #[derive(Component)] - /// # struct Score(u32); - /// # - /// # fn system(mut commands: Commands) { - /// commands.spawn_batch(vec![ - /// ( - /// Name("Alice".to_string()), - /// Score(0), - /// ), - /// ( - /// Name("Bob".to_string()), - /// Score(0), - /// ), - /// ]); - /// # } - /// # bevy_ecs::system::assert_is_system(system); - /// ``` - /// - /// # See also - /// - /// - [`spawn`](Self::spawn) to spawn an entity with a bundle. - /// - [`spawn_empty`](Self::spawn_empty) to spawn an entity without any components. - #[track_caller] - pub fn spawn_batch(&mut self, bundles_iter: I) + pub fn spawn_batch(&mut self, batch: I) where I: IntoIterator + Send + Sync + 'static, I::Item: Bundle, { - self.queue(command::spawn_batch(bundles_iter)); + self.queue(command::spawn_batch(batch)); } /// Pushes a generic [`Command`] to the command queue. /// - /// If the [`Command`] returns a [`Result`], it will be handled using the [default error handler](error_handler::default). + /// If the [`Command`] returns a [`Result`], + /// it will be handled using the [default error handler](crate::error::default_error_handler). /// /// To use a custom error handler, see [`Commands::queue_handled`]. /// @@ -578,6 +540,7 @@ impl<'w, 's> Commands<'w, 's> { /// fn add_three_to_counter_system(mut commands: Commands) { /// commands.queue(AddToCounter("3".to_string())); /// } + /// /// fn add_twenty_five_to_counter_system(mut commands: Commands) { /// commands.queue(|world: &mut World| { /// let mut counter = world.get_resource_or_insert_with(Counter::default); @@ -590,8 +553,11 @@ impl<'w, 's> Commands<'w, 's> { pub fn queue + HandleError, T>(&mut self, command: C) { self.queue_internal(command.handle_error()); } - /// Pushes a generic [`Command`] to the command queue. If the command returns a [`Result`] the given - /// `error_handler` will be used to handle error cases. + + /// Pushes a generic [`Command`] to the command queue. + /// + /// If the [`Command`] returns a [`Result`], + /// the given `error_handler` will be used to handle error cases. /// /// To implicitly use the default error handler, see [`Commands::queue`]. /// @@ -606,7 +572,8 @@ impl<'w, 's> Commands<'w, 's> { /// /// ``` /// # use bevy_ecs::prelude::*; - /// # use bevy_ecs::system::error_handler; + /// use bevy_ecs::error::warn; + /// /// #[derive(Resource, Default)] /// struct Counter(u64); /// @@ -622,8 +589,9 @@ impl<'w, 's> Commands<'w, 's> { /// } /// /// fn add_three_to_counter_system(mut commands: Commands) { - /// commands.queue_handled(AddToCounter("3".to_string()), error_handler::warn()); + /// commands.queue_handled(AddToCounter("3".to_string()), warn); /// } + /// /// fn add_twenty_five_to_counter_system(mut commands: Commands) { /// commands.queue(|world: &mut World| { /// let mut counter = world.get_resource_or_insert_with(Counter::default); @@ -636,7 +604,7 @@ impl<'w, 's> Commands<'w, 's> { pub fn queue_handled + HandleError, T>( &mut self, command: C, - error_handler: fn(&mut World, Error), + error_handler: fn(BevyError, ErrorContext), ) { self.queue_internal(command.handle_error_with(error_handler)); } @@ -678,16 +646,24 @@ impl<'w, 's> Commands<'w, 's> { /// This method should generally only be used for sharing entities across apps, and only when they have a scheme /// worked out to share an ID space (which doesn't happen by default). #[track_caller] + #[deprecated( + since = "0.16.0", + note = "This can cause extreme performance problems when used with lots of arbitrary free entities. See #18054 on GitHub." + )] pub fn insert_or_spawn_batch(&mut self, bundles_iter: I) where I: IntoIterator + Send + Sync + 'static, B: Bundle, { - let caller = Location::caller(); + let caller = MaybeLocation::caller(); self.queue(move |world: &mut World| { + + #[expect( + deprecated, + reason = "This needs to be supported for now, and the outer item is deprecated too." + )] if let Err(invalid_entities) = world.insert_or_spawn_batch_with_caller( bundles_iter, - #[cfg(feature = "track_location")] caller, ) { error!( @@ -699,24 +675,27 @@ impl<'w, 's> Commands<'w, 's> { }); } - /// Pushes a [`Command`] to the queue for adding a [`Bundle`] type to a batch of [`Entities`](Entity). + /// Adds a series of [`Bundles`](Bundle) to each [`Entity`] they are paired with, + /// based on a batch of `(Entity, Bundle)` pairs. /// - /// A batch can be any type that implements [`IntoIterator`] containing `(Entity, Bundle)` tuples, - /// such as a [`Vec<(Entity, Bundle)>`](alloc::vec::Vec) or an array `[(Entity, Bundle); N]`. + /// A batch can be any type that implements [`IntoIterator`] + /// and contains `(Entity, Bundle)` tuples, + /// such as a [`Vec<(Entity, Bundle)>`](alloc::vec::Vec) + /// or an array `[(Entity, Bundle); N]`. /// - /// When the command is applied, for each `(Entity, Bundle)` pair in the given batch, - /// the `Bundle` is added to the `Entity`, overwriting any existing components shared by the `Bundle`. + /// This will overwrite any pre-existing components shared by the [`Bundle`] type. + /// Use [`Commands::insert_batch_if_new`] to keep the pre-existing components instead. /// - /// This method is equivalent to iterating the batch, - /// calling [`entity`](Self::entity) for each pair, - /// and passing the bundle to [`insert`](EntityCommands::insert), - /// but it is faster due to memory pre-allocation. + /// This method is equivalent to iterating the batch + /// and calling [`insert`](EntityCommands::insert) for each pair, + /// but is faster by caching data that is shared between entities. /// - /// # Panics + /// # Fallible /// - /// This command panics if any of the given entities do not exist. + /// This command will fail if any of the given entities do not exist. /// - /// For the non-panicking version, see [`try_insert_batch`](Self::try_insert_batch). + /// It will internally return a [`TryInsertBatchError`](crate::world::error::TryInsertBatchError), + /// which will be handled by the [default error handler](crate::error::default_error_handler). #[track_caller] pub fn insert_batch(&mut self, batch: I) where @@ -726,24 +705,28 @@ impl<'w, 's> Commands<'w, 's> { self.queue(command::insert_batch(batch, InsertMode::Replace)); } - /// Pushes a [`Command`] to the queue for adding a [`Bundle`] type to a batch of [`Entities`](Entity). + /// Adds a series of [`Bundles`](Bundle) to each [`Entity`] they are paired with, + /// based on a batch of `(Entity, Bundle)` pairs. /// - /// A batch can be any type that implements [`IntoIterator`] containing `(Entity, Bundle)` tuples, - /// such as a [`Vec<(Entity, Bundle)>`](alloc::vec::Vec) or an array `[(Entity, Bundle); N]`. + /// A batch can be any type that implements [`IntoIterator`] + /// and contains `(Entity, Bundle)` tuples, + /// such as a [`Vec<(Entity, Bundle)>`](alloc::vec::Vec) + /// or an array `[(Entity, Bundle); N]`. /// - /// When the command is applied, for each `(Entity, Bundle)` pair in the given batch, - /// the `Bundle` is added to the `Entity`, except for any components already present on the `Entity`. + /// This will keep any pre-existing components shared by the [`Bundle`] type + /// and discard the new values. + /// Use [`Commands::insert_batch`] to overwrite the pre-existing components instead. /// - /// This method is equivalent to iterating the batch, - /// calling [`entity`](Self::entity) for each pair, - /// and passing the bundle to [`insert_if_new`](EntityCommands::insert_if_new), - /// but it is faster due to memory pre-allocation. + /// This method is equivalent to iterating the batch + /// and calling [`insert_if_new`](EntityCommands::insert_if_new) for each pair, + /// but is faster by caching data that is shared between entities. /// - /// # Panics + /// # Fallible /// - /// This command panics if any of the given entities do not exist. + /// This command will fail if any of the given entities do not exist. /// - /// For the non-panicking version, see [`try_insert_batch_if_new`](Self::try_insert_batch_if_new). + /// It will internally return a [`TryInsertBatchError`](crate::world::error::TryInsertBatchError), + /// which will be handled by the [default error handler](crate::error::default_error_handler). #[track_caller] pub fn insert_batch_if_new(&mut self, batch: I) where @@ -753,83 +736,88 @@ impl<'w, 's> Commands<'w, 's> { self.queue(command::insert_batch(batch, InsertMode::Keep)); } - /// Pushes a [`Command`] to the queue for adding a [`Bundle`] type to a batch of [`Entities`](Entity). + /// Adds a series of [`Bundles`](Bundle) to each [`Entity`] they are paired with, + /// based on a batch of `(Entity, Bundle)` pairs. /// - /// A batch can be any type that implements [`IntoIterator`] containing `(Entity, Bundle)` tuples, - /// such as a [`Vec<(Entity, Bundle)>`](alloc::vec::Vec) or an array `[(Entity, Bundle); N]`. + /// A batch can be any type that implements [`IntoIterator`] + /// and contains `(Entity, Bundle)` tuples, + /// such as a [`Vec<(Entity, Bundle)>`](alloc::vec::Vec) + /// or an array `[(Entity, Bundle); N]`. /// - /// When the command is applied, for each `(Entity, Bundle)` pair in the given batch, - /// the `Bundle` is added to the `Entity`, overwriting any existing components shared by the `Bundle`. + /// This will overwrite any pre-existing components shared by the [`Bundle`] type. + /// Use [`Commands::try_insert_batch_if_new`] to keep the pre-existing components instead. /// - /// This method is equivalent to iterating the batch, - /// calling [`get_entity`](Self::get_entity) for each pair, - /// and passing the bundle to [`insert`](EntityCommands::insert), - /// but it is faster due to memory pre-allocation. + /// This method is equivalent to iterating the batch + /// and calling [`insert`](EntityCommands::insert) for each pair, + /// but is faster by caching data that is shared between entities. /// - /// This command will send a warning if any of the given entities do not exist. + /// # Fallible /// - /// For the panicking version, see [`insert_batch`](Self::insert_batch). + /// This command will fail if any of the given entities do not exist. + /// + /// It will internally return a [`TryInsertBatchError`](crate::world::error::TryInsertBatchError), + /// which will be handled by [logging the error at the `warn` level](warn). #[track_caller] pub fn try_insert_batch(&mut self, batch: I) where I: IntoIterator + Send + Sync + 'static, B: Bundle, { - self.queue( - command::insert_batch(batch, InsertMode::Replace) - .handle_error_with(error_handler::warn()), - ); + self.queue(command::insert_batch(batch, InsertMode::Replace).handle_error_with(warn)); } - /// Pushes a [`Command`] to the queue for adding a [`Bundle`] type to a batch of [`Entities`](Entity). + /// Adds a series of [`Bundles`](Bundle) to each [`Entity`] they are paired with, + /// based on a batch of `(Entity, Bundle)` pairs. /// - /// A batch can be any type that implements [`IntoIterator`] containing `(Entity, Bundle)` tuples, - /// such as a [`Vec<(Entity, Bundle)>`](alloc::vec::Vec) or an array `[(Entity, Bundle); N]`. + /// A batch can be any type that implements [`IntoIterator`] + /// and contains `(Entity, Bundle)` tuples, + /// such as a [`Vec<(Entity, Bundle)>`](alloc::vec::Vec) + /// or an array `[(Entity, Bundle); N]`. /// - /// When the command is applied, for each `(Entity, Bundle)` pair in the given batch, - /// the `Bundle` is added to the `Entity`, except for any components already present on the `Entity`. + /// This will keep any pre-existing components shared by the [`Bundle`] type + /// and discard the new values. + /// Use [`Commands::try_insert_batch`] to overwrite the pre-existing components instead. /// - /// This method is equivalent to iterating the batch, - /// calling [`get_entity`](Self::get_entity) for each pair, - /// and passing the bundle to [`insert_if_new`](EntityCommands::insert_if_new), - /// but it is faster due to memory pre-allocation. + /// This method is equivalent to iterating the batch + /// and calling [`insert_if_new`](EntityCommands::insert_if_new) for each pair, + /// but is faster by caching data that is shared between entities. /// - /// This command will send a warning if any of the given entities do not exist. + /// # Fallible /// - /// For the panicking version, see [`insert_batch_if_new`](Self::insert_batch_if_new). + /// This command will fail if any of the given entities do not exist. + /// + /// It will internally return a [`TryInsertBatchError`](crate::world::error::TryInsertBatchError), + /// which will be handled by [logging the error at the `warn` level](warn). #[track_caller] pub fn try_insert_batch_if_new(&mut self, batch: I) where I: IntoIterator + Send + Sync + 'static, B: Bundle, { - self.queue( - command::insert_batch(batch, InsertMode::Keep).handle_error_with(error_handler::warn()), - ); + self.queue(command::insert_batch(batch, InsertMode::Keep).handle_error_with(warn)); } - /// Pushes a [`Command`] to the queue for inserting a [`Resource`] in the [`World`] with an inferred value. + /// Inserts a [`Resource`] into the [`World`] with an inferred value. /// /// The inferred value is determined by the [`FromWorld`] trait of the resource. - /// When the command is applied, - /// if the resource already exists, nothing happens. + /// Note that any resource with the [`Default`] trait automatically implements [`FromWorld`], + /// and those default values will be used. /// - /// See [`World::init_resource`] for more details. + /// If the resource already exists when the command is applied, nothing happens. /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # - /// # #[derive(Resource, Default)] - /// # struct Scoreboard { - /// # current_score: u32, - /// # high_score: u32, - /// # } - /// # - /// # fn initialize_scoreboard(mut commands: Commands) { - /// commands.init_resource::(); - /// # } + /// #[derive(Resource, Default)] + /// struct Scoreboard { + /// current_score: u32, + /// high_score: u32, + /// } + /// + /// fn initialize_scoreboard(mut commands: Commands) { + /// commands.init_resource::(); + /// } /// # bevy_ecs::system::assert_is_system(initialize_scoreboard); /// ``` #[track_caller] @@ -837,29 +825,26 @@ impl<'w, 's> Commands<'w, 's> { self.queue(command::init_resource::()); } - /// Pushes a [`Command`] to the queue for inserting a [`Resource`] in the [`World`] with a specific value. + /// Inserts a [`Resource`] into the [`World`] with a specific value. /// /// This will overwrite any previous value of the same resource type. /// - /// See [`World::insert_resource`] for more details. - /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # - /// # #[derive(Resource)] - /// # struct Scoreboard { - /// # current_score: u32, - /// # high_score: u32, - /// # } - /// # - /// # fn system(mut commands: Commands) { - /// commands.insert_resource(Scoreboard { - /// current_score: 0, - /// high_score: 0, - /// }); - /// # } + /// #[derive(Resource)] + /// struct Scoreboard { + /// current_score: u32, + /// high_score: u32, + /// } + /// + /// fn system(mut commands: Commands) { + /// commands.insert_resource(Scoreboard { + /// current_score: 0, + /// high_score: 0, + /// }); + /// } /// # bevy_ecs::system::assert_is_system(system); /// ``` #[track_caller] @@ -867,24 +852,21 @@ impl<'w, 's> Commands<'w, 's> { self.queue(command::insert_resource(resource)); } - /// Pushes a [`Command`] to the queue for removing a [`Resource`] from the [`World`]. - /// - /// See [`World::remove_resource`] for more details. + /// Removes a [`Resource`] from the [`World`]. /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # - /// # #[derive(Resource)] - /// # struct Scoreboard { - /// # current_score: u32, - /// # high_score: u32, - /// # } - /// # - /// # fn system(mut commands: Commands) { - /// commands.remove_resource::(); - /// # } + /// #[derive(Resource)] + /// struct Scoreboard { + /// current_score: u32, + /// high_score: u32, + /// } + /// + /// fn system(mut commands: Commands) { + /// commands.remove_resource::(); + /// } /// # bevy_ecs::system::assert_is_system(system); /// ``` pub fn remove_resource(&mut self) { @@ -892,55 +874,82 @@ impl<'w, 's> Commands<'w, 's> { } /// Runs the system corresponding to the given [`SystemId`]. - /// Systems are ran in an exclusive and single threaded way. - /// Running slow systems can become a bottleneck. + /// Before running a system, it must first be registered via + /// [`Commands::register_system`] or [`World::register_system`]. /// - /// Calls [`World::run_system`](World::run_system). + /// The system is run in an exclusive and single-threaded way. + /// Running slow systems can become a bottleneck. /// /// There is no way to get the output of a system when run as a command, because the /// execution of the system happens later. To get the output of a system, use /// [`World::run_system`] or [`World::run_system_with`] instead of running the system as a command. + /// + /// # Fallible + /// + /// This command will fail if the given [`SystemId`] + /// does not correspond to a [`System`](crate::system::System). + /// + /// It will internally return a [`RegisteredSystemError`](crate::system::system_registry::RegisteredSystemError), + /// which will be handled by [logging the error at the `warn` level](warn). pub fn run_system(&mut self, id: SystemId) { - self.queue(command::run_system(id).handle_error_with(error_handler::warn())); + self.queue(command::run_system(id).handle_error_with(warn)); } - /// Runs the system corresponding to the given [`SystemId`]. - /// Systems are ran in an exclusive and single threaded way. - /// Running slow systems can become a bottleneck. + /// Runs the system corresponding to the given [`SystemId`] with input. + /// Before running a system, it must first be registered via + /// [`Commands::register_system`] or [`World::register_system`]. /// - /// Calls [`World::run_system_with`](World::run_system_with). + /// The system is run in an exclusive and single-threaded way. + /// Running slow systems can become a bottleneck. /// /// There is no way to get the output of a system when run as a command, because the /// execution of the system happens later. To get the output of a system, use /// [`World::run_system`] or [`World::run_system_with`] instead of running the system as a command. + /// + /// # Fallible + /// + /// This command will fail if the given [`SystemId`] + /// does not correspond to a [`System`](crate::system::System). + /// + /// It will internally return a [`RegisteredSystemError`](crate::system::system_registry::RegisteredSystemError), + /// which will be handled by [logging the error at the `warn` level](warn). pub fn run_system_with(&mut self, id: SystemId, input: I::Inner<'static>) where I: SystemInput: Send> + 'static, { - self.queue(command::run_system_with(id, input).handle_error_with(error_handler::warn())); + self.queue(command::run_system_with(id, input).handle_error_with(warn)); } - /// Registers a system and returns a [`SystemId`] so it can later be called by [`World::run_system`]. - /// - /// It's possible to register the same systems more than once, they'll be stored separately. + /// Registers a system and returns its [`SystemId`] so it can later be called by + /// [`Commands::run_system`] or [`World::run_system`]. /// /// This is different from adding systems to a [`Schedule`](crate::schedule::Schedule), /// because the [`SystemId`] that is returned can be used anywhere in the [`World`] to run the associated system. - /// This allows for running systems in a push-based fashion. + /// /// Using a [`Schedule`](crate::schedule::Schedule) is still preferred for most cases /// due to its better performance and ability to run non-conflicting systems simultaneously. /// - /// If you want to prevent Commands from registering the same system multiple times, consider using [`Local`](crate::system::Local) + /// # Note + /// + /// If the same system is registered more than once, + /// each registration will be considered a different system, + /// and they will each be given their own [`SystemId`]. + /// + /// If you want to avoid registering the same system multiple times, + /// consider using [`Commands::run_system_cached`] or storing the [`SystemId`] + /// in a [`Local`](crate::system::Local). /// /// # Example /// /// ``` /// # use bevy_ecs::{prelude::*, world::CommandQueue, system::SystemId}; - /// /// #[derive(Resource)] /// struct Counter(i32); /// - /// fn register_system(mut local_system: Local>, mut commands: Commands) { + /// fn register_system( + /// mut commands: Commands, + /// mut local_system: Local>, + /// ) { /// if let Some(system) = *local_system { /// commands.run_system(system); /// } else { @@ -983,73 +992,122 @@ impl<'w, 's> Commands<'w, 's> { SystemId::from_entity(entity) } - /// Removes a system previously registered with [`Commands::register_system`] or [`World::register_system`]. + /// Removes a system previously registered with [`Commands::register_system`] + /// or [`World::register_system`]. /// - /// See [`World::unregister_system`] for more information. + /// After removing a system, the [`SystemId`] becomes invalid + /// and attempting to use it afterwards will result in an error. + /// Re-adding the removed system will register it with a new `SystemId`. + /// + /// # Fallible + /// + /// This command will fail if the given [`SystemId`] + /// does not correspond to a [`System`](crate::system::System). + /// + /// It will internally return a [`RegisteredSystemError`](crate::system::system_registry::RegisteredSystemError), + /// which will be handled by [logging the error at the `warn` level](warn). pub fn unregister_system(&mut self, system_id: SystemId) where I: SystemInput + Send + 'static, O: Send + 'static, { - self.queue(command::unregister_system(system_id).handle_error_with(error_handler::warn())); + self.queue(command::unregister_system(system_id).handle_error_with(warn)); } - /// Removes a system previously registered with [`World::register_system_cached`]. + /// Removes a system previously registered with one of the following: + /// - [`Commands::run_system_cached`] + /// - [`World::run_system_cached`] + /// - [`World::register_system_cached`] /// - /// See [`World::unregister_system_cached`] for more information. - pub fn unregister_system_cached< + /// # Fallible + /// + /// This command will fail if the given system + /// is not currently cached in a [`CachedSystemId`](crate::system::CachedSystemId) resource. + /// + /// It will internally return a [`RegisteredSystemError`](crate::system::system_registry::RegisteredSystemError), + /// which will be handled by [logging the error at the `warn` level](warn). + pub fn unregister_system_cached(&mut self, system: S) + where I: SystemInput + Send + 'static, O: 'static, M: 'static, S: IntoSystem + Send + 'static, - >( - &mut self, - system: S, - ) { - self.queue( - command::unregister_system_cached(system).handle_error_with(error_handler::warn()), - ); + { + self.queue(command::unregister_system_cached(system).handle_error_with(warn)); } - /// Similar to [`Self::run_system`], but caching the [`SystemId`] in a - /// [`CachedSystemId`](crate::system::CachedSystemId) resource. + /// Runs a cached system, registering it if necessary. /// - /// See [`World::register_system_cached`] for more information. - pub fn run_system_cached + Send + 'static>( - &mut self, - system: S, - ) { - self.queue(command::run_system_cached(system).handle_error_with(error_handler::warn())); + /// Unlike [`Commands::run_system`], this method does not require manual registration. + /// + /// The first time this method is called for a particular system, + /// it will register the system and store its [`SystemId`] in a + /// [`CachedSystemId`](crate::system::CachedSystemId) resource for later. + /// + /// If you would rather manage the [`SystemId`] yourself, + /// or register multiple copies of the same system, + /// use [`Commands::register_system`] instead. + /// + /// # Limitations + /// + /// This method only accepts ZST (zero-sized) systems to guarantee that any two systems of + /// the same type must be equal. This means that closures that capture the environment, and + /// function pointers, are not accepted. + /// + /// If you want to access values from the environment within a system, + /// consider passing them in as inputs via [`Commands::run_system_cached_with`]. + /// + /// If that's not an option, consider [`Commands::register_system`] instead. + pub fn run_system_cached(&mut self, system: S) + where + M: 'static, + S: IntoSystem<(), (), M> + Send + 'static, + { + self.queue(command::run_system_cached(system).handle_error_with(warn)); } - /// Similar to [`Self::run_system_with`], but caching the [`SystemId`] in a - /// [`CachedSystemId`](crate::system::CachedSystemId) resource. + /// Runs a cached system with an input, registering it if necessary. /// - /// See [`World::register_system_cached`] for more information. + /// Unlike [`Commands::run_system_with`], this method does not require manual registration. + /// + /// The first time this method is called for a particular system, + /// it will register the system and store its [`SystemId`] in a + /// [`CachedSystemId`](crate::system::CachedSystemId) resource for later. + /// + /// If you would rather manage the [`SystemId`] yourself, + /// or register multiple copies of the same system, + /// use [`Commands::register_system`] instead. + /// + /// # Limitations + /// + /// This method only accepts ZST (zero-sized) systems to guarantee that any two systems of + /// the same type must be equal. This means that closures that capture the environment, and + /// function pointers, are not accepted. + /// + /// If you want to access values from the environment within a system, + /// consider passing them in as inputs. + /// + /// If that's not an option, consider [`Commands::register_system`] instead. pub fn run_system_cached_with(&mut self, system: S, input: I::Inner<'static>) where I: SystemInput: Send> + Send + 'static, M: 'static, S: IntoSystem + Send + 'static, { - self.queue( - command::run_system_cached_with(system, input).handle_error_with(error_handler::warn()), - ); + self.queue(command::run_system_cached_with(system, input).handle_error_with(warn)); } - /// Sends a "global" [`Trigger`] without any targets. This will run any [`Observer`] of the `event` that - /// isn't scoped to specific targets. + /// Sends a "global" [`Trigger`](crate::observer::Trigger) without any targets. /// - /// [`Trigger`]: crate::observer::Trigger + /// This will run any [`Observer`] of the given [`Event`] that isn't scoped to specific targets. #[track_caller] pub fn trigger(&mut self, event: impl Event) { self.queue(command::trigger(event)); } - /// Sends a [`Trigger`] for the given targets. This will run any [`Observer`] of the `event` that - /// watches those targets. + /// Sends a [`Trigger`](crate::observer::Trigger) for the given targets. /// - /// [`Trigger`]: crate::observer::Trigger + /// This will run any [`Observer`] of the given [`Event`] watching those targets. #[track_caller] pub fn trigger_targets( &mut self, @@ -1074,14 +1132,16 @@ impl<'w, 's> Commands<'w, 's> { /// Sends an arbitrary [`Event`]. /// - /// This is a convenience method for sending events without requiring an [`EventWriter`]. - /// ## Performance + /// This is a convenience method for sending events + /// without requiring an [`EventWriter`](crate::event::EventWriter). + /// + /// # Performance + /// /// Since this is a command, exclusive world access is used, which means that it will not profit from /// system-level parallelism on supported platforms. - /// If these events are performance-critical or very frequently - /// sent, consider using a typed [`EventWriter`] instead. /// - /// [`EventWriter`]: crate::event::EventWriter + /// If these events are performance-critical or very frequently sent, + /// consider using a typed [`EventWriter`](crate::event::EventWriter) instead. #[track_caller] pub fn send_event(&mut self, event: E) -> &mut Self { self.queue(command::send_event(event)); @@ -1092,17 +1152,21 @@ impl<'w, 's> Commands<'w, 's> { /// /// Calls [`World::try_run_schedule`](World::try_run_schedule). /// - /// This will log an error if the schedule is not available to be run. + /// # Fallible /// - /// # Examples + /// This command will fail if the given [`ScheduleLabel`] + /// does not correspond to a [`Schedule`](crate::schedule::Schedule). + /// + /// It will internally return a [`TryRunScheduleError`](crate::world::error::TryRunScheduleError), + /// which will be handled by [logging the error at the `warn` level](warn). + /// + /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; /// # use bevy_ecs::schedule::ScheduleLabel; - /// # /// # #[derive(Default, Resource)] /// # struct Counter(u32); - /// # /// #[derive(ScheduleLabel, Hash, Debug, PartialEq, Eq, Clone, Copy)] /// struct FooSchedule; /// @@ -1128,7 +1192,7 @@ impl<'w, 's> Commands<'w, 's> { /// # assert_eq!(world.resource::().0, 1); /// ``` pub fn run_schedule(&mut self, label: impl ScheduleLabel) { - self.queue(command::run_schedule(label).handle_error_with(error_handler::warn())); + self.queue(command::run_schedule(label).handle_error_with(warn)); } } @@ -1136,30 +1200,36 @@ impl<'w, 's> Commands<'w, 's> { /// /// # Note /// -/// Most [`Commands`] (and thereby [`EntityCommands`]) are deferred: when you call the command, -/// if it requires mutable access to the [`World`] (that is, if it removes, adds, or changes something), -/// it's not executed immediately. Instead, the command is added to a "command queue." -/// The command queue is applied between [`Schedules`](bevy_ecs::schedule::Schedule), one by one, -/// so that each command can have exclusive access to the World. +/// Most [`Commands`] (and thereby [`EntityCommands`]) are deferred: +/// when you call the command, if it requires mutable access to the [`World`] +/// (that is, if it removes, adds, or changes something), it's not executed immediately. +/// +/// Instead, the command is added to a "command queue." +/// The command queue is applied later +/// when the [`ApplyDeferred`](crate::schedule::ApplyDeferred) system runs. +/// Commands are executed one-by-one so that +/// each command can have exclusive access to the `World`. /// /// # Fallible /// -/// Due to their deferred nature, an entity you're trying to change with an [`EntityCommand`] can be -/// despawned by the time the command is executed. All deferred entity commands will check if the -/// entity exists at the time of execution and will return an error if it doesn't. +/// Due to their deferred nature, an entity you're trying to change with an [`EntityCommand`] +/// can be despawned by the time the command is executed. +/// +/// All deferred entity commands will check whether the entity exists at the time of execution +/// and will return an error if it doesn't. /// /// # Error handling /// -/// [`EntityCommands`] can return a [`Result`](crate::result::Result), which can be passed to -/// an error handler. Error handlers are functions/closures of the form -/// `fn(&mut World, CommandError)`. +/// An [`EntityCommand`] can return a [`Result`](crate::error::Result), +/// which will be passed to an [error handler](crate::error) if the `Result` is an error. /// -/// The default error handler panics. It can be configured by enabling the `configurable_error_handler` -/// cargo feature, then setting the `GLOBAL_ERROR_HANDLER`. +/// The [default error handler](crate::error::default_error_handler) panics. +/// It can be configured by setting the `GLOBAL_ERROR_HANDLER`. /// -/// Alternatively, you can customize the error handler for a specific command by calling [`EntityCommands::queue_handled`]. +/// Alternatively, you can customize the error handler for a specific command +/// by calling [`EntityCommands::queue_handled`]. /// -/// The [`error_handler`] module provides some simple error handlers for convenience. +/// The [`error`](crate::error) module provides some simple error handlers for convenience. pub struct EntityCommands<'a> { pub(crate) entity: Entity, pub(crate) commands: Commands<'a, 'a>, @@ -1185,6 +1255,7 @@ impl<'a> EntityCommands<'a> { } /// Returns an [`EntityCommands`] with a smaller lifetime. + /// /// This is useful if you have `&mut EntityCommands` but you need `EntityCommands`. pub fn reborrow(&mut self) -> EntityCommands { EntityCommands { @@ -1196,7 +1267,8 @@ impl<'a> EntityCommands<'a> { /// Get an [`EntityEntryCommands`] for the [`Component`] `T`, /// allowing you to modify it or insert it if it isn't already present. /// - /// See also [`insert_if_new`](Self::insert_if_new), which lets you insert a [`Bundle`] without overwriting it. + /// See also [`insert_if_new`](Self::insert_if_new), + /// which lets you insert a [`Bundle`] without overwriting it. /// /// # Example /// @@ -1211,9 +1283,9 @@ impl<'a> EntityCommands<'a> { /// commands /// .entity(player.entity) /// .entry::() - /// // Modify the component if it exists + /// // Modify the component if it exists. /// .and_modify(|mut lvl| lvl.0 += 1) - /// // Otherwise insert a default value + /// // Otherwise, insert a default value. /// .or_insert(Level(0)); /// } /// # bevy_ecs::system::assert_is_system(level_up_system); @@ -1230,12 +1302,6 @@ impl<'a> EntityCommands<'a> { /// This will overwrite any previous value(s) of the same component type. /// See [`EntityCommands::insert_if_new`] to keep the old value instead. /// - /// # Panics - /// - /// The command will panic when applied if the associated entity does not exist. - /// - /// To avoid a panic in this case, use the command [`Self::try_insert`] instead. - /// /// # Example /// /// ``` @@ -1279,18 +1345,13 @@ impl<'a> EntityCommands<'a> { /// ``` #[track_caller] pub fn insert(&mut self, bundle: impl Bundle) -> &mut Self { - self.queue(entity_command::insert(bundle)) + self.queue(entity_command::insert(bundle, InsertMode::Replace)) } - /// Similar to [`Self::insert`] but will only insert if the predicate returns true. + /// Adds a [`Bundle`] of components to the entity if the predicate returns true. + /// /// This is useful for chaining method calls. /// - /// # Panics - /// - /// The command will panic when applied if the associated entity does not exist. - /// - /// To avoid a panic in this case, use the command [`Self::try_insert_if`] instead. - /// /// # Example /// /// ``` @@ -1326,36 +1387,20 @@ impl<'a> EntityCommands<'a> { /// Adds a [`Bundle`] of components to the entity without overwriting. /// /// This is the same as [`EntityCommands::insert`], but in case of duplicate - /// components will leave the old values instead of replacing them with new - /// ones. + /// components will leave the old values instead of replacing them with new ones. /// /// See also [`entry`](Self::entry), which lets you modify a [`Component`] if it's present, /// as well as initialize it with a default value. - /// - /// # Panics - /// - /// The command will panic when applied if the associated entity does not exist. - /// - /// To avoid a panic in this case, use the command [`Self::try_insert_if_new`] instead. #[track_caller] pub fn insert_if_new(&mut self, bundle: impl Bundle) -> &mut Self { - self.queue(entity_command::insert_if_new(bundle)) + self.queue(entity_command::insert(bundle, InsertMode::Keep)) } /// Adds a [`Bundle`] of components to the entity without overwriting if the /// predicate returns true. /// /// This is the same as [`EntityCommands::insert_if`], but in case of duplicate - /// components will leave the old values instead of replacing them with new - /// ones. - /// - /// # Panics - /// - /// The command will panic when applied if the associated entity does not - /// exist. - /// - /// To avoid a panic in this case, use the command [`Self::try_insert_if_new`] - /// instead. + /// components will leave the old values instead of replacing them with new ones. #[track_caller] pub fn insert_if_new_and(&mut self, bundle: impl Bundle, condition: F) -> &mut Self where @@ -1368,15 +1413,11 @@ impl<'a> EntityCommands<'a> { } } - /// Adds a dynamic component to an entity. + /// Adds a dynamic [`Component`] to the entity. /// - /// See [`EntityWorldMut::insert_by_id`] for more information. + /// This will overwrite any previous value(s) of the same component type. /// - /// # Panics - /// - /// The command will panic when applied if the associated entity does not exist. - /// - /// To avoid a panic in this case, use the command [`Self::try_insert_by_id`] instead. + /// You should prefer to use the typed API [`EntityCommands::insert`] where possible. /// /// # Safety /// @@ -1388,12 +1429,24 @@ impl<'a> EntityCommands<'a> { component_id: ComponentId, value: T, ) -> &mut Self { - self.queue(entity_command::insert_by_id(component_id, value)) + self.queue( + // SAFETY: + // - `ComponentId` safety is ensured by the caller. + // - `T` safety is ensured by the caller. + unsafe { entity_command::insert_by_id(component_id, value, InsertMode::Replace) }, + ) } - /// Attempts to add a dynamic component to an entity. + /// Adds a dynamic [`Component`] to the entity. /// - /// See [`EntityWorldMut::insert_by_id`] for more information. + /// This will overwrite any previous value(s) of the same component type. + /// + /// You should prefer to use the typed API [`EntityCommands::try_insert`] where possible. + /// + /// # Note + /// + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. /// /// # Safety /// @@ -1406,18 +1459,22 @@ impl<'a> EntityCommands<'a> { value: T, ) -> &mut Self { self.queue_handled( - entity_command::insert_by_id(component_id, value), - error_handler::silent(), + // SAFETY: + // - `ComponentId` safety is ensured by the caller. + // - `T` safety is ensured by the caller. + unsafe { entity_command::insert_by_id(component_id, value, InsertMode::Replace) }, + ignore, ) } - /// Tries to add a [`Bundle`] of components to the entity. + /// Adds a [`Bundle`] of components to the entity. /// /// This will overwrite any previous value(s) of the same component type. /// /// # Note /// - /// Unlike [`Self::insert`], this will not panic if the associated entity does not exist. + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. /// /// # Example /// @@ -1439,57 +1496,36 @@ impl<'a> EntityCommands<'a> { /// } /// /// fn add_combat_stats_system(mut commands: Commands, player: Res) { - /// commands.entity(player.entity) - /// // You can try_insert individual components: - /// .try_insert(Defense(10)) + /// commands.entity(player.entity) + /// // You can insert individual components: + /// .try_insert(Defense(10)) + /// // You can also insert tuples of components: + /// .try_insert(CombatBundle { + /// health: Health(100), + /// strength: Strength(40), + /// }); /// - /// // You can also insert tuples of components: - /// .try_insert(CombatBundle { - /// health: Health(100), - /// strength: Strength(40), - /// }); + /// // Suppose this occurs in a parallel adjacent system or process. + /// commands.entity(player.entity).despawn(); /// - /// // Suppose this occurs in a parallel adjacent system or process - /// commands.entity(player.entity) - /// .despawn(); - /// - /// commands.entity(player.entity) - /// // This will not panic nor will it add the component - /// .try_insert(Defense(5)); + /// // This will not panic nor will it add the component. + /// commands.entity(player.entity).try_insert(Defense(5)); /// } /// # bevy_ecs::system::assert_is_system(add_combat_stats_system); /// ``` #[track_caller] pub fn try_insert(&mut self, bundle: impl Bundle) -> &mut Self { - self.queue_handled(entity_command::insert(bundle), error_handler::silent()) + self.queue_handled(entity_command::insert(bundle, InsertMode::Replace), ignore) } - /// Similar to [`Self::try_insert`] but will only try to insert if the predicate returns true. + /// Adds a [`Bundle`] of components to the entity if the predicate returns true. + /// /// This is useful for chaining method calls. /// - /// # Example + /// # Note /// - /// ``` - /// # use bevy_ecs::prelude::*; - /// # #[derive(Resource)] - /// # struct PlayerEntity { entity: Entity } - /// # impl PlayerEntity { fn is_spectator(&self) -> bool { true } } - /// #[derive(Component)] - /// struct StillLoadingStats; - /// #[derive(Component)] - /// struct Health(u32); - /// - /// fn add_health_system(mut commands: Commands, player: Res) { - /// commands.entity(player.entity) - /// .try_insert_if(Health(10), || !player.is_spectator()) - /// .remove::(); - /// - /// commands.entity(player.entity) - /// // This will not panic nor will it add the component - /// .try_insert_if(Health(5), || !player.is_spectator()); - /// } - /// # bevy_ecs::system::assert_is_system(add_health_system); - /// ``` + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. #[track_caller] pub fn try_insert_if(&mut self, bundle: impl Bundle, condition: F) -> &mut Self where @@ -1502,41 +1538,16 @@ impl<'a> EntityCommands<'a> { } } - /// Tries to add a [`Bundle`] of components to the entity without overwriting if the + /// Adds a [`Bundle`] of components to the entity without overwriting if the /// predicate returns true. /// /// This is the same as [`EntityCommands::try_insert_if`], but in case of duplicate - /// components will leave the old values instead of replacing them with new - /// ones. + /// components will leave the old values instead of replacing them with new ones. /// /// # Note /// - /// Unlike [`Self::insert_if_new_and`], this will not panic if the associated entity does - /// not exist. - /// - /// # Example - /// - /// ``` - /// # use bevy_ecs::prelude::*; - /// # #[derive(Resource)] - /// # struct PlayerEntity { entity: Entity } - /// # impl PlayerEntity { fn is_spectator(&self) -> bool { true } } - /// #[derive(Component)] - /// struct StillLoadingStats; - /// #[derive(Component)] - /// struct Health(u32); - /// - /// fn add_health_system(mut commands: Commands, player: Res) { - /// commands.entity(player.entity) - /// .try_insert_if(Health(10), || player.is_spectator()) - /// .remove::(); - /// - /// commands.entity(player.entity) - /// // This will not panic nor will it overwrite the component - /// .try_insert_if_new_and(Health(5), || player.is_spectator()); - /// } - /// # bevy_ecs::system::assert_is_system(add_health_system); - /// ``` + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. #[track_caller] pub fn try_insert_if_new_and(&mut self, bundle: impl Bundle, condition: F) -> &mut Self where @@ -1549,30 +1560,31 @@ impl<'a> EntityCommands<'a> { } } - /// Tries to add a [`Bundle`] of components to the entity without overwriting. + /// Adds a [`Bundle`] of components to the entity without overwriting. /// /// This is the same as [`EntityCommands::try_insert`], but in case of duplicate - /// components will leave the old values instead of replacing them with new - /// ones. + /// components will leave the old values instead of replacing them with new ones. /// /// # Note /// - /// Unlike [`Self::insert_if_new`], this will not panic if the associated entity does not exist. + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. #[track_caller] pub fn try_insert_if_new(&mut self, bundle: impl Bundle) -> &mut Self { - self.queue_handled( - entity_command::insert_if_new(bundle), - error_handler::silent(), - ) + self.queue_handled(entity_command::insert(bundle, InsertMode::Keep), ignore) } /// Removes a [`Bundle`] of components from the entity. /// + /// This will remove all components that intersect with the provided bundle; + /// the entity does not need to have all the components in the bundle. + /// + /// This will emit a warning if the entity does not exist. + /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # /// # #[derive(Resource)] /// # struct PlayerEntity { entity: Entity } /// #[derive(Component)] @@ -1593,7 +1605,7 @@ impl<'a> EntityCommands<'a> { /// .entity(player.entity) /// // You can remove individual components: /// .remove::() - /// // You can also remove pre-defined Bundles of components: + /// // You can also remove pre-defined bundles of components: /// .remove::() /// // You can also remove tuples of components and bundles. /// // This is equivalent to the calls above: @@ -1602,24 +1614,22 @@ impl<'a> EntityCommands<'a> { /// # bevy_ecs::system::assert_is_system(remove_combat_stats_system); /// ``` #[track_caller] - pub fn remove(&mut self) -> &mut Self - where - T: Bundle, - { - self.queue_handled(entity_command::remove::(), error_handler::warn()) + pub fn remove(&mut self) -> &mut Self { + self.queue_handled(entity_command::remove::(), warn) } /// Removes a [`Bundle`] of components from the entity. /// - /// # Note + /// This will remove all components that intersect with the provided bundle; + /// the entity does not need to have all the components in the bundle. /// - /// Unlike [`Self::remove`], this will not panic if the associated entity does not exist. + /// Unlike [`Self::remove`], + /// this will not emit a warning if the entity does not exist. /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # /// # #[derive(Resource)] /// # struct PlayerEntity { entity: Entity } /// #[derive(Component)] @@ -1640,7 +1650,7 @@ impl<'a> EntityCommands<'a> { /// .entity(player.entity) /// // You can remove individual components: /// .try_remove::() - /// // You can also remove pre-defined Bundles of components: + /// // You can also remove pre-defined bundles of components: /// .try_remove::() /// // You can also remove tuples of components and bundles. /// // This is equivalent to the calls above: @@ -1648,40 +1658,40 @@ impl<'a> EntityCommands<'a> { /// } /// # bevy_ecs::system::assert_is_system(remove_combat_stats_system); /// ``` - pub fn try_remove(&mut self) -> &mut Self - where - T: Bundle, - { - self.queue_handled(entity_command::remove::(), error_handler::silent()) + pub fn try_remove(&mut self) -> &mut Self { + self.queue_handled(entity_command::remove::(), ignore) } - /// Removes all components in the [`Bundle`] components and remove all required components for each component in the [`Bundle`] from entity. + /// Removes a [`Bundle`] of components from the entity, + /// and also removes any components required by the components in the bundle. + /// + /// This will remove all components that intersect with the provided bundle; + /// the entity does not need to have all the components in the bundle. /// /// # Example /// /// ``` - /// use bevy_ecs::prelude::*; - /// + /// # use bevy_ecs::prelude::*; + /// # #[derive(Resource)] + /// # struct PlayerEntity { entity: Entity } + /// # /// #[derive(Component)] /// #[require(B)] /// struct A; /// #[derive(Component, Default)] /// struct B; /// - /// #[derive(Resource)] - /// struct PlayerEntity { entity: Entity } - /// /// fn remove_with_requires_system(mut commands: Commands, player: Res) { /// commands /// .entity(player.entity) - /// // Remove both A and B components from the entity, because B is required by A + /// // Removes both A and B from the entity, because B is required by A. /// .remove_with_requires::(); /// } /// # bevy_ecs::system::assert_is_system(remove_with_requires_system); /// ``` #[track_caller] - pub fn remove_with_requires(&mut self) -> &mut Self { - self.queue(entity_command::remove_with_requires::()) + pub fn remove_with_requires(&mut self) -> &mut Self { + self.queue(entity_command::remove_with_requires::()) } /// Removes a dynamic [`Component`] from the entity if it exists. @@ -1704,33 +1714,31 @@ impl<'a> EntityCommands<'a> { /// /// This will emit a warning if the entity does not exist. /// - /// See [`World::despawn`] for more details. - /// /// # Note /// - /// This will also despawn the entities in any [`RelationshipTarget`](crate::relationship::RelationshipTarget) that is configured - /// to despawn descendants. For example, this will recursively despawn [`Children`](crate::hierarchy::Children). + /// This will also despawn the entities in any [`RelationshipTarget`](crate::relationship::RelationshipTarget) + /// that is configured to despawn descendants. + /// + /// For example, this will recursively despawn [`Children`](crate::hierarchy::Children). /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # /// # #[derive(Resource)] /// # struct CharacterToRemove { entity: Entity } /// # /// fn remove_character_system( /// mut commands: Commands, /// character_to_remove: Res - /// ) - /// { + /// ) { /// commands.entity(character_to_remove.entity).despawn(); /// } /// # bevy_ecs::system::assert_is_system(remove_character_system); /// ``` #[track_caller] pub fn despawn(&mut self) { - self.queue_handled(entity_command::despawn(), error_handler::warn()); + self.queue_handled(entity_command::despawn(), warn); } /// Despawns the provided entity and its descendants. #[deprecated( @@ -1743,20 +1751,26 @@ impl<'a> EntityCommands<'a> { /// Despawns the entity. /// - /// This will not emit a warning if the entity does not exist, essentially performing - /// the same function as [`Self::despawn`] without emitting warnings. + /// Unlike [`Self::despawn`], + /// this will not emit a warning if the entity does not exist. /// /// # Note /// - /// This will also despawn the entities in any [`RelationshipTarget`](crate::relationship::RelationshipTarget) that are configured - /// to despawn descendants. For example, this will recursively despawn [`Children`](crate::hierarchy::Children). + /// This will also despawn the entities in any [`RelationshipTarget`](crate::relationship::RelationshipTarget) + /// that is configured to despawn descendants. + /// + /// For example, this will recursively despawn [`Children`](crate::hierarchy::Children). pub fn try_despawn(&mut self) { - self.queue_handled(entity_command::despawn(), error_handler::silent()); + self.queue_handled(entity_command::despawn(), ignore); } - /// Pushes an [`EntityCommand`] to the queue, which will get executed for the current [`Entity`]. + /// Pushes an [`EntityCommand`] to the queue, + /// which will get executed for the current [`Entity`]. /// - /// If the [`EntityCommand`] returns a [`Result`], it will be handled using the [default error handler](error_handler::default). + /// The [default error handler](crate::error::default_error_handler) + /// will be used to handle error cases. + /// Every [`EntityCommand`] checks whether the entity exists at the time of execution + /// and returns an error if it does not. /// /// To use a custom error handler, see [`EntityCommands::queue_handled`]. /// @@ -1767,7 +1781,7 @@ impl<'a> EntityCommands<'a> { /// - [`(EntityWorldMut)`](EntityWorldMut) `->` [`Result`] /// - A built-in command from the [`entity_command`] module. /// - /// # Examples + /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; @@ -1789,8 +1803,12 @@ impl<'a> EntityCommands<'a> { self } - /// Pushes an [`EntityCommand`] to the queue, which will get executed for the current [`Entity`]. - /// If the command returns a [`Result`] the given `error_handler` will be used to handle error cases. + /// Pushes an [`EntityCommand`] to the queue, + /// which will get executed for the current [`Entity`]. + /// + /// The given `error_handler` will be used to handle error cases. + /// Every [`EntityCommand`] checks whether the entity exists at the time of execution + /// and returns an error if it does not. /// /// To implicitly use the default error handler, see [`EntityCommands::queue`]. /// @@ -1801,12 +1819,13 @@ impl<'a> EntityCommands<'a> { /// - [`(EntityWorldMut)`](EntityWorldMut) `->` [`Result`] /// - A built-in command from the [`entity_command`] module. /// - /// # Examples + /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # use bevy_ecs::system::error_handler; /// # fn my_system(mut commands: Commands) { + /// use bevy_ecs::error::warn; + /// /// commands /// .spawn_empty() /// // Closures with this signature implement `EntityCommand`. @@ -1816,7 +1835,7 @@ impl<'a> EntityCommands<'a> { /// println!("Successfully parsed the value {} for entity {}", value, entity.id()); /// Ok(()) /// }, - /// error_handler::warn() + /// warn /// ); /// # } /// # bevy_ecs::system::assert_is_system(my_system); @@ -1824,7 +1843,7 @@ impl<'a> EntityCommands<'a> { pub fn queue_handled + CommandWithEntity, T, M>( &mut self, command: C, - error_handler: fn(&mut World, Error), + error_handler: fn(BevyError, ErrorContext), ) -> &mut Self { self.commands .queue_handled(command.with_entity(self.entity), error_handler); @@ -1833,13 +1852,10 @@ impl<'a> EntityCommands<'a> { /// Removes all components except the given [`Bundle`] from the entity. /// - /// This can also be used to remove all the components from the entity by passing it an empty Bundle. - /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # /// # #[derive(Resource)] /// # struct PlayerEntity { entity: Entity } /// #[derive(Component)] @@ -1859,28 +1875,19 @@ impl<'a> EntityCommands<'a> { /// commands /// .entity(player.entity) /// // You can retain a pre-defined Bundle of components, - /// // with this removing only the Defense component + /// // with this removing only the Defense component. /// .retain::() - /// // You can also retain only a single component - /// .retain::() - /// // And you can remove all the components by passing in an empty Bundle - /// .retain::<()>(); + /// // You can also retain only a single component. + /// .retain::(); /// } /// # bevy_ecs::system::assert_is_system(remove_combat_stats_system); /// ``` #[track_caller] - pub fn retain(&mut self) -> &mut Self - where - T: Bundle, - { - self.queue(entity_command::retain::()) + pub fn retain(&mut self) -> &mut Self { + self.queue(entity_command::retain::()) } - /// Logs the components of the entity at the info level. - /// - /// # Panics - /// - /// The command will panic when applied if the associated entity does not exist. + /// Logs the components of the entity at the [`info`](log::info) level. pub fn log_components(&mut self) -> &mut Self { self.queue(entity_command::log_components()) } @@ -1895,13 +1902,12 @@ impl<'a> EntityCommands<'a> { &mut self.commands } - /// Sends a [`Trigger`] targeting this entity. This will run any [`Observer`] of the `event` that - /// watches this entity. + /// Sends a [`Trigger`](crate::observer::Trigger) targeting the entity. /// - /// [`Trigger`]: crate::observer::Trigger + /// This will run any [`Observer`] of the given [`Event`] watching this entity. + #[track_caller] pub fn trigger(&mut self, event: impl Event) -> &mut Self { - self.commands.trigger_targets(event, self.entity); - self + self.queue(entity_command::trigger(event)) } /// Creates an [`Observer`] listening for events of type `E` targeting this entity. @@ -1927,20 +1933,19 @@ impl<'a> EntityCommands<'a> { /// Configure through [`EntityClonerBuilder`] as follows: /// ``` /// # use bevy_ecs::prelude::*; - /// /// #[derive(Component, Clone)] /// struct ComponentA(u32); /// #[derive(Component, Clone)] /// struct ComponentB(u32); /// /// fn example_system(mut commands: Commands) { - /// // Create an empty entity + /// // Create an empty entity. /// let target = commands.spawn_empty().id(); /// - /// // Create a new entity and keep its EntityCommands + /// // Create a new entity and keep its EntityCommands. /// let mut entity = commands.spawn((ComponentA(10), ComponentB(20))); /// - /// // Clone only ComponentA onto the target + /// // Clone only ComponentA onto the target. /// entity.clone_with(target, |builder| { /// builder.deny::(); /// }); @@ -1974,17 +1979,16 @@ impl<'a> EntityCommands<'a> { /// /// ``` /// # use bevy_ecs::prelude::*; - /// /// #[derive(Component, Clone)] /// struct ComponentA(u32); /// #[derive(Component, Clone)] /// struct ComponentB(u32); /// /// fn example_system(mut commands: Commands) { - /// // Create a new entity and keep its EntityCommands + /// // Create a new entity and store its EntityCommands. /// let mut entity = commands.spawn((ComponentA(10), ComponentB(20))); /// - /// // Create a clone of the first entity + /// // Create a clone of the first entity. /// let mut entity_clone = entity.clone_and_spawn(); /// } /// # bevy_ecs::system::assert_is_system(example_system); @@ -2013,17 +2017,16 @@ impl<'a> EntityCommands<'a> { /// /// ``` /// # use bevy_ecs::prelude::*; - /// /// #[derive(Component, Clone)] /// struct ComponentA(u32); /// #[derive(Component, Clone)] /// struct ComponentB(u32); /// /// fn example_system(mut commands: Commands) { - /// // Create a new entity and keep its EntityCommands + /// // Create a new entity and store its EntityCommands. /// let mut entity = commands.spawn((ComponentA(10), ComponentB(20))); /// - /// // Create a clone of the first entity, but without ComponentB + /// // Create a clone of the first entity, but without ComponentB. /// let mut entity_clone = entity.clone_and_spawn_with(|builder| { /// builder.deny::(); /// }); @@ -2087,61 +2090,48 @@ impl<'a, T: Component> EntityEntryCommands<'a, T> { } impl<'a, T: Component> EntityEntryCommands<'a, T> { - /// [Insert](EntityCommands::insert) `default` into this entity, if `T` is not already present. - /// - /// See also [`or_insert_with`](Self::or_insert_with). - /// - /// # Panics - /// - /// Panics if the entity does not exist. - /// See [`or_try_insert`](Self::or_try_insert) for a non-panicking version. + /// [Insert](EntityCommands::insert) `default` into this entity, + /// if `T` is not already present. #[track_caller] pub fn or_insert(&mut self, default: T) -> &mut Self { self.entity_commands.insert_if_new(default); self } - /// [Insert](EntityCommands::insert) `default` into this entity, if `T` is not already present. + /// [Insert](EntityCommands::insert) `default` into this entity, + /// if `T` is not already present. /// - /// Unlike [`or_insert`](Self::or_insert), this will not panic if the entity does not exist. + /// # Note /// - /// See also [`or_insert_with`](Self::or_insert_with). + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. #[track_caller] pub fn or_try_insert(&mut self, default: T) -> &mut Self { self.entity_commands.try_insert_if_new(default); self } - /// [Insert](EntityCommands::insert) the value returned from `default` into this entity, if `T` is not already present. - /// - /// See also [`or_insert`](Self::or_insert) and [`or_try_insert`](Self::or_try_insert). - /// - /// # Panics - /// - /// Panics if the entity does not exist. - /// See [`or_try_insert_with`](Self::or_try_insert_with) for a non-panicking version. + /// [Insert](EntityCommands::insert) the value returned from `default` into this entity, + /// if `T` is not already present. #[track_caller] pub fn or_insert_with(&mut self, default: impl Fn() -> T) -> &mut Self { self.or_insert(default()) } - /// [Insert](EntityCommands::insert) the value returned from `default` into this entity, if `T` is not already present. + /// [Insert](EntityCommands::insert) the value returned from `default` into this entity, + /// if `T` is not already present. /// - /// Unlike [`or_insert_with`](Self::or_insert_with), this will not panic if the entity does not exist. + /// # Note /// - /// See also [`or_insert`](Self::or_insert) and [`or_try_insert`](Self::or_try_insert). + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. #[track_caller] pub fn or_try_insert_with(&mut self, default: impl Fn() -> T) -> &mut Self { self.or_try_insert(default()) } - /// [Insert](EntityCommands::insert) `T::default` into this entity, if `T` is not already present. - /// - /// See also [`or_insert`](Self::or_insert) and [`or_from_world`](Self::or_from_world). - /// - /// # Panics - /// - /// Panics if the entity does not exist. + /// [Insert](EntityCommands::insert) `T::default` into this entity, + /// if `T` is not already present. #[track_caller] pub fn or_default(&mut self) -> &mut Self where @@ -2150,13 +2140,8 @@ impl<'a, T: Component> EntityEntryCommands<'a, T> { self.or_insert(T::default()) } - /// [Insert](EntityCommands::insert) `T::from_world` into this entity, if `T` is not already present. - /// - /// See also [`or_insert`](Self::or_insert) and [`or_default`](Self::or_default). - /// - /// # Panics - /// - /// Panics if the entity does not exist. + /// [Insert](EntityCommands::insert) `T::from_world` into this entity, + /// if `T` is not already present. #[track_caller] pub fn or_from_world(&mut self) -> &mut Self where @@ -2184,13 +2169,13 @@ impl<'a, T: Component> EntityEntryCommands<'a, T> { /// commands /// .entity(player.entity) /// .entry::() - /// // Modify the component if it exists + /// // Modify the component if it exists. /// .and_modify(|mut lvl| lvl.0 += 1) - /// // Otherwise insert a default value + /// // Otherwise, insert a default value. /// .or_insert(Level(0)) - /// // Return the EntityCommands for the entity + /// // Return the EntityCommands for the entity. /// .entity() - /// // And continue chaining method calls + /// // Continue chaining method calls. /// .insert(Name::new("Player")); /// } /// # bevy_ecs::system::assert_is_system(level_up_system); @@ -2203,7 +2188,7 @@ impl<'a, T: Component> EntityEntryCommands<'a, T> { #[cfg(test)] mod tests { use crate::{ - component::{require, Component}, + component::Component, resource::Resource, system::Commands, world::{CommandQueue, FromWorld, World}, diff --git a/crates/bevy_ecs/src/system/commands/parallel_scope.rs b/crates/bevy_ecs/src/system/commands/parallel_scope.rs index 2f471c13c5..bee491017d 100644 --- a/crates/bevy_ecs/src/system/commands/parallel_scope.rs +++ b/crates/bevy_ecs/src/system/commands/parallel_scope.rs @@ -20,9 +20,13 @@ struct ParallelCommandQueue { /// [`Bundle`](crate::prelude::Bundle) type need to be spawned, consider using /// [`Commands::spawn_batch`] for better performance. /// -/// Note: Because command application order will depend on how many threads are ran, non-commutative commands may result in non-deterministic results. +/// # Note +/// +/// Because command application order will depend on how many threads are ran, +/// non-commutative commands may result in non-deterministic results. +/// +/// # Example /// -/// Example: /// ``` /// # use bevy_ecs::prelude::*; /// # use bevy_tasks::ComputeTaskPool; diff --git a/crates/bevy_ecs/src/system/exclusive_function_system.rs b/crates/bevy_ecs/src/system/exclusive_function_system.rs index 2b1c081d51..15027d2aef 100644 --- a/crates/bevy_ecs/src/system/exclusive_function_system.rs +++ b/crates/bevy_ecs/src/system/exclusive_function_system.rs @@ -14,6 +14,8 @@ use alloc::{borrow::Cow, vec, vec::Vec}; use core::marker::PhantomData; use variadics_please::all_tuples; +use super::SystemParamValidationError; + /// A function system that runs with exclusive [`World`] access. /// /// You get this by calling [`IntoSystem::into_system`] on a function that only accepts @@ -111,13 +113,11 @@ where #[inline] unsafe fn run_unsafe( &mut self, - _input: SystemIn<'_, Self>, - _world: UnsafeWorldCell, + input: SystemIn<'_, Self>, + world: UnsafeWorldCell, ) -> Self::Out { - panic!("Cannot run exclusive systems with a shared World reference"); - } - - fn run(&mut self, input: SystemIn<'_, Self>, world: &mut World) -> Self::Out { + // SAFETY: The safety is upheld by the caller. + let world = unsafe { world.world_mut() }; world.last_change_tick_scope(self.system_meta.last_run, |world| { #[cfg(feature = "trace")] let _span_guard = self.system_meta.system_span.enter(); @@ -150,9 +150,12 @@ where } #[inline] - unsafe fn validate_param_unsafe(&mut self, _world: UnsafeWorldCell) -> bool { + unsafe fn validate_param_unsafe( + &mut self, + _world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { // All exclusive system params are always available. - true + Ok(()) } #[inline] @@ -281,6 +284,7 @@ macro_rules! impl_exclusive_system_function { // without using this function. It fails to recognize that `func` // is a function, potentially because of the multiple impls of `FnMut` fn call_inner( + _: PhantomData, mut f: impl FnMut(In::Param<'_>, &mut World, $($param,)*) -> Out, input: In::Inner<'_>, world: &mut World, @@ -289,7 +293,7 @@ macro_rules! impl_exclusive_system_function { f(In::wrap(input), world, $($param,)*) } let ($($param,)*) = param_value; - call_inner(self, input, world, $($param),*) + call_inner(PhantomData::, self, input, world, $($param),*) } } }; diff --git a/crates/bevy_ecs/src/system/function_system.rs b/crates/bevy_ecs/src/system/function_system.rs index 0f3950d1d4..b0bbe187ed 100644 --- a/crates/bevy_ecs/src/system/function_system.rs +++ b/crates/bevy_ecs/src/system/function_system.rs @@ -18,7 +18,7 @@ use variadics_please::all_tuples; #[cfg(feature = "trace")] use tracing::{info_span, Span}; -use super::{IntoSystem, ReadOnlySystem, SystemParamBuilder}; +use super::{IntoSystem, ReadOnlySystem, SystemParamBuilder, SystemParamValidationError}; /// The metadata of a [`System`]. #[derive(Clone)] @@ -43,7 +43,6 @@ pub struct SystemMeta { is_send: bool, has_deferred: bool, pub(crate) last_run: Tick, - param_warn_policy: ParamWarnPolicy, #[cfg(feature = "trace")] pub(crate) system_span: Span, #[cfg(feature = "trace")] @@ -60,7 +59,6 @@ impl SystemMeta { is_send: true, has_deferred: false, last_run: Tick::new(0), - param_warn_policy: ParamWarnPolicy::Panic, #[cfg(feature = "trace")] system_span: info_span!("system", name = name), #[cfg(feature = "trace")] @@ -116,27 +114,6 @@ impl SystemMeta { self.has_deferred = true; } - /// Changes the warn policy. - #[inline] - pub(crate) fn set_param_warn_policy(&mut self, warn_policy: ParamWarnPolicy) { - self.param_warn_policy = warn_policy; - } - - /// Advances the warn policy after validation failed. - #[inline] - pub(crate) fn advance_param_warn_policy(&mut self) { - self.param_warn_policy.advance(); - } - - /// Emits a warning about inaccessible system param if policy allows it. - #[inline] - pub fn try_warn_param

(&self) - where - P: SystemParam, - { - self.param_warn_policy.try_warn::

(&self.name); - } - /// Archetype component access that is used to determine which systems can run in parallel with each other /// in the multithreaded executor. /// @@ -187,83 +164,6 @@ impl SystemMeta { } } -/// State machine for emitting warnings when [system params are invalid](System::validate_param). -#[derive(Clone, Copy)] -pub enum ParamWarnPolicy { - /// Stop app with a panic. - Panic, - /// No warning should ever be emitted. - Never, - /// The warning will be emitted once and status will update to [`Self::Never`]. - Warn, -} - -impl ParamWarnPolicy { - /// Advances the warn policy after validation failed. - #[inline] - fn advance(&mut self) { - // Ignore `Panic` case, because it stops execution before this function gets called. - *self = Self::Never; - } - - /// Emits a warning about inaccessible system param if policy allows it. - #[inline] - fn try_warn

(&self, name: &str) - where - P: SystemParam, - { - match self { - Self::Panic => panic!( - "{0} could not access system parameter {1}", - name, - disqualified::ShortName::of::

() - ), - Self::Warn => { - log::warn!( - "{0} did not run because it requested inaccessible system parameter {1}", - name, - disqualified::ShortName::of::

for CubicBezier

{ #[inline] fn to_curve(&self) -> Result, Self::Error> { - // A derivation for this matrix can be found in "General Matrix Representations for B-splines" by Kaihuai Qin. - // - // See section 4.2 and equation 11. - let char_matrix = [ - [1., 0., 0., 0.], - [-3., 3., 0., 0.], - [3., -6., 3., 0.], - [-1., 3., -3., 1.], - ]; - let segments = self .control_points .iter() - .map(|p| CubicSegment::coefficients(*p, char_matrix)) + .map(|p| CubicSegment::new_bezier(*p)) .collect_vec(); if segments.is_empty() { @@ -151,7 +141,7 @@ pub struct CubicBezierError; /// [`to_curve_cyclic`]: CyclicCubicGenerator::to_curve_cyclic #[derive(Clone, Debug)] #[cfg(feature = "alloc")] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct CubicHermite { /// The control points of the Hermite curve. pub control_points: Vec<(P, P)>, @@ -280,7 +270,7 @@ impl CyclicCubicGenerator

for CubicHermite

{ /// [`to_curve_cyclic`]: CyclicCubicGenerator::to_curve_cyclic #[derive(Clone, Debug)] #[cfg(feature = "alloc")] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct CubicCardinalSpline { /// Tension pub tension: f32, @@ -442,7 +432,7 @@ impl CyclicCubicGenerator

for CubicCardinalSpline

{ /// [`to_curve_cyclic`]: CyclicCubicGenerator::to_curve_cyclic #[derive(Clone, Debug)] #[cfg(feature = "alloc")] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct CubicBSpline { /// The control points of the spline pub control_points: Vec

, @@ -619,7 +609,7 @@ pub enum CubicNurbsError { /// ``` #[derive(Clone, Debug)] #[cfg(feature = "alloc")] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct CubicNurbs { /// The control points of the NURBS pub control_points: Vec

, @@ -746,10 +736,9 @@ impl CubicNurbs

{ } let last_knots_value = control_points - 3; Some( - core::iter::repeat(0.0) - .take(4) + core::iter::repeat_n(0.0, 4) .chain((1..last_knots_value).map(|v| v as f32)) - .chain(core::iter::repeat(last_knots_value as f32).take(4)) + .chain(core::iter::repeat_n(last_knots_value as f32, 4)) .collect(), ) } @@ -852,7 +841,7 @@ impl RationalGenerator

for CubicNurbs

{ /// [`to_curve_cyclic`]: CyclicCubicGenerator::to_curve_cyclic #[derive(Clone, Debug)] #[cfg(feature = "alloc")] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct LinearSpline { /// The control points of the linear spline. pub points: Vec

, @@ -963,7 +952,11 @@ pub trait CyclicCubicGenerator { /// [`Curve`]: crate::curve::Curve #[derive(Copy, Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Default, Clone) +)] pub struct CubicSegment { /// Polynomial coefficients for the segment. pub coeff: [P; 4], @@ -994,14 +987,21 @@ impl CubicSegment

{ c * 2.0 + d * 6.0 * t } + /// Creates a cubic segment from four points, representing a Bezier curve. + pub fn new_bezier(points: [P; 4]) -> Self { + // A derivation for this matrix can be found in "General Matrix Representations for B-splines" by Kaihuai Qin. + // + // See section 4.2 and equation 11. + let char_matrix = [ + [1., 0., 0., 0.], + [-3., 3., 0., 0.], + [3., -6., 3., 0.], + [-1., 3., -3., 1.], + ]; + Self::coefficients(points, char_matrix) + } + /// Calculate polynomial coefficients for the cubic curve using a characteristic matrix. - #[cfg_attr( - not(feature = "alloc"), - expect( - dead_code, - reason = "Method only used when `alloc` feature is enabled." - ) - )] #[inline] fn coefficients(p: [P; 4], char_matrix: [[f32; 4]; 4]) -> Self { let [c0, c1, c2, c3] = char_matrix; @@ -1015,6 +1015,46 @@ impl CubicSegment

{ ]; Self { coeff } } + + /// A flexible iterator used to sample curves with arbitrary functions. + /// + /// This splits the curve into `subdivisions` of evenly spaced `t` values across the + /// length of the curve from start (t = 0) to end (t = n), where `n = self.segment_count()`, + /// returning an iterator evaluating the curve with the supplied `sample_function` at each `t`. + /// + /// For `subdivisions = 2`, this will split the curve into two lines, or three points, and + /// return an iterator with 3 items, the three points, one at the start, middle, and end. + #[inline] + pub fn iter_samples<'a, 'b: 'a>( + &'b self, + subdivisions: usize, + mut sample_function: impl FnMut(&Self, f32) -> P + 'a, + ) -> impl Iterator + 'a { + self.iter_uniformly(subdivisions) + .map(move |t| sample_function(self, t)) + } + + /// An iterator that returns values of `t` uniformly spaced over `0..=subdivisions`. + #[inline] + fn iter_uniformly(&self, subdivisions: usize) -> impl Iterator { + let step = 1.0 / subdivisions as f32; + (0..=subdivisions).map(move |i| i as f32 * step) + } + + /// Iterate over the curve split into `subdivisions`, sampling the position at each step. + pub fn iter_positions(&self, subdivisions: usize) -> impl Iterator + '_ { + self.iter_samples(subdivisions, Self::position) + } + + /// Iterate over the curve split into `subdivisions`, sampling the velocity at each step. + pub fn iter_velocities(&self, subdivisions: usize) -> impl Iterator + '_ { + self.iter_samples(subdivisions, Self::velocity) + } + + /// Iterate over the curve split into `subdivisions`, sampling the acceleration at each step. + pub fn iter_accelerations(&self, subdivisions: usize) -> impl Iterator + '_ { + self.iter_samples(subdivisions, Self::acceleration) + } } /// The `CubicSegment` can be used as a 2-dimensional easing curve for animation. @@ -1030,12 +1070,9 @@ impl CubicSegment { /// This is a very common tool for UI animations that accelerate and decelerate smoothly. For /// example, the ubiquitous "ease-in-out" is defined as `(0.25, 0.1), (0.25, 1.0)`. #[cfg(feature = "alloc")] - pub fn new_bezier(p1: impl Into, p2: impl Into) -> Self { + pub fn new_bezier_easing(p1: impl Into, p2: impl Into) -> Self { let (p0, p3) = (Vec2::ZERO, Vec2::ONE); - let bezier = CubicBezier::new([[p0, p1.into(), p2.into(), p3]]) - .to_curve() - .unwrap(); // Succeeds because resulting curve is guaranteed to have one segment - bezier.segments[0] + Self::new_bezier([p0, p1.into(), p2.into(), p3]) } /// Maximum allowable error for iterative Bezier solve @@ -1052,7 +1089,7 @@ impl CubicSegment { /// # use bevy_math::prelude::*; /// # #[cfg(feature = "alloc")] /// # { - /// let cubic_bezier = CubicSegment::new_bezier((0.25, 0.1), (0.25, 1.0)); + /// let cubic_bezier = CubicSegment::new_bezier_easing((0.25, 0.1), (0.25, 1.0)); /// assert_eq!(cubic_bezier.ease(0.0), 0.0); /// assert_eq!(cubic_bezier.ease(1.0), 1.0); /// # } @@ -1072,7 +1109,7 @@ impl CubicSegment { /// y /// │ ● /// │ ⬈ - /// │ ⬈ + /// │ ⬈ /// │ ⬈ /// │ ⬈ /// ●─────────── x (time) @@ -1086,8 +1123,8 @@ impl CubicSegment { /// ```text /// y /// ⬈➔● - /// │ ⬈ - /// │ ↑ + /// │ ⬈ + /// │ ↑ /// │ ↑ /// │ ⬈ /// ●➔⬈───────── x (time) @@ -1140,7 +1177,7 @@ impl CubicSegment { #[derive(Clone, Debug, PartialEq)] #[cfg(feature = "alloc")] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct CubicCurve { /// The segments comprising the curve. This must always be nonempty. segments: Vec>, @@ -1297,7 +1334,11 @@ pub trait RationalGenerator { /// [`Curve`]: crate::curve::Curve #[derive(Copy, Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Default, Clone) +)] pub struct RationalSegment { /// The coefficients matrix of the cubic curve. pub coeff: [P; 4], @@ -1436,7 +1477,7 @@ impl RationalSegment

{ #[derive(Clone, Debug, PartialEq)] #[cfg(feature = "alloc")] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct RationalCurve { /// The segments comprising the curve. This must always be nonempty. segments: Vec>, @@ -1657,7 +1698,7 @@ mod tests { #[test] fn easing_simple() { // A curve similar to ease-in-out, but symmetric - let bezier = CubicSegment::new_bezier([1.0, 0.0], [0.0, 1.0]); + let bezier = CubicSegment::new_bezier_easing([1.0, 0.0], [0.0, 1.0]); assert_eq!(bezier.ease(0.0), 0.0); assert!(bezier.ease(0.2) < 0.2); // tests curve assert_eq!(bezier.ease(0.5), 0.5); // true due to symmetry @@ -1670,7 +1711,7 @@ mod tests { #[test] fn easing_overshoot() { // A curve that forms an upside-down "U", that should extend above 1.0 - let bezier = CubicSegment::new_bezier([0.0, 2.0], [1.0, 2.0]); + let bezier = CubicSegment::new_bezier_easing([0.0, 2.0], [1.0, 2.0]); assert_eq!(bezier.ease(0.0), 0.0); assert!(bezier.ease(0.5) > 1.5); assert_eq!(bezier.ease(1.0), 1.0); @@ -1680,7 +1721,7 @@ mod tests { /// the start and end positions, e.g. bouncing. #[test] fn easing_undershoot() { - let bezier = CubicSegment::new_bezier([0.0, -2.0], [1.0, -2.0]); + let bezier = CubicSegment::new_bezier_easing([0.0, -2.0], [1.0, -2.0]); assert_eq!(bezier.ease(0.0), 0.0); assert!(bezier.ease(0.5) < -0.5); assert_eq!(bezier.ease(1.0), 1.0); diff --git a/crates/bevy_math/src/curve/adaptors.rs b/crates/bevy_math/src/curve/adaptors.rs index afc34837d3..055002c9bb 100644 --- a/crates/bevy_math/src/curve/adaptors.rs +++ b/crates/bevy_math/src/curve/adaptors.rs @@ -91,7 +91,7 @@ pub struct FunctionCurve { pub(crate) domain: Interval, #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] pub(crate) f: F, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -192,7 +192,7 @@ pub struct MapCurve { pub(crate) preimage: C, #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] pub(crate) f: F, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData<(fn() -> S, fn(S) -> T)>, } @@ -289,7 +289,7 @@ pub struct ReparamCurve { pub(crate) base: C, #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] pub(crate) f: F, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -383,7 +383,7 @@ pub struct LinearReparamCurve { pub(crate) base: C, /// Invariants: This interval must always be bounded. pub(crate) new_domain: Interval, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -416,7 +416,7 @@ where pub struct CurveReparamCurve { pub(crate) base: C, pub(crate) reparam_curve: D, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -448,7 +448,7 @@ where )] pub struct GraphCurve { pub(crate) base: C, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -480,7 +480,7 @@ pub struct ZipCurve { pub(crate) domain: Interval, pub(crate) first: C, pub(crate) second: D, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData (S, T)>, } @@ -520,7 +520,7 @@ where pub struct ChainCurve { pub(crate) first: C, pub(crate) second: D, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -569,7 +569,7 @@ where )] pub struct ReverseCurve { pub(crate) curve: C, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -611,7 +611,7 @@ where pub struct RepeatCurve { pub(crate) domain: Interval, pub(crate) curve: C, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -669,7 +669,7 @@ where )] pub struct ForeverCurve { pub(crate) curve: C, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -723,7 +723,7 @@ where )] pub struct PingPongCurve { pub(crate) curve: C, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -780,7 +780,7 @@ pub struct ContinuationCurve { pub(crate) second: D, // cache the offset in the curve directly to prevent triple sampling for every sample we make pub(crate) offset: T, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } diff --git a/crates/bevy_math/src/curve/derivatives/mod.rs b/crates/bevy_math/src/curve/derivatives/mod.rs index d819443f0d..5949d356e2 100644 --- a/crates/bevy_math/src/curve/derivatives/mod.rs +++ b/crates/bevy_math/src/curve/derivatives/mod.rs @@ -37,24 +37,28 @@ use bevy_reflect::{FromReflect, Reflect}; /// derivatives to be extracted along with values. /// /// This is implemented by implementing [`SampleDerivative`]. -pub trait CurveWithDerivative: SampleDerivative +pub trait CurveWithDerivative: SampleDerivative + Sized where T: HasTangent, { /// This curve, but with its first derivative included in sampling. - fn with_derivative(self) -> impl Curve>; + /// + /// Notably, the output type is a `Curve>`. + fn with_derivative(self) -> SampleDerivativeWrapper; } /// Trait for curves that have a well-defined notion of second derivative, /// allowing for two derivatives to be extracted along with values. /// /// This is implemented by implementing [`SampleTwoDerivatives`]. -pub trait CurveWithTwoDerivatives: SampleTwoDerivatives +pub trait CurveWithTwoDerivatives: SampleTwoDerivatives + Sized where T: HasTangent, { /// This curve, but with its first two derivatives included in sampling. - fn with_two_derivatives(self) -> impl Curve>; + /// + /// Notably, the output type is a `Curve>`. + fn with_two_derivatives(self) -> SampleTwoDerivativesWrapper; } /// A trait for curves that can sample derivatives in addition to values. @@ -210,7 +214,7 @@ where T: HasTangent, C: SampleDerivative, { - fn with_derivative(self) -> impl Curve> { + fn with_derivative(self) -> SampleDerivativeWrapper { SampleDerivativeWrapper(self) } } @@ -220,7 +224,7 @@ where T: HasTangent, C: SampleTwoDerivatives + CurveWithDerivative, { - fn with_two_derivatives(self) -> impl Curve> { + fn with_two_derivatives(self) -> SampleTwoDerivativesWrapper { SampleTwoDerivativesWrapper(self) } } diff --git a/crates/bevy_math/src/curve/easing.rs b/crates/bevy_math/src/curve/easing.rs index a5e5692e6f..c0b452e001 100644 --- a/crates/bevy_math/src/curve/easing.rs +++ b/crates/bevy_math/src/curve/easing.rs @@ -8,6 +8,9 @@ use crate::{ Dir2, Dir3, Dir3A, Isometry2d, Isometry3d, Quat, Rot2, VectorSpace, }; +#[cfg(feature = "bevy_reflect")] +use bevy_reflect::std_traits::ReflectDefault; + use variadics_please::all_tuples_enumerated; // TODO: Think about merging `Ease` with `StableInterpolate` @@ -150,8 +153,83 @@ all_tuples_enumerated!( /// /// The resulting curve's domain is always [the unit interval]. /// +/// # Example +/// +/// Create a linear curve that interpolates between `2.0` and `4.0`. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// let c = EasingCurve::new(2.0, 4.0, EaseFunction::Linear); +/// ``` +/// +/// [`sample`] the curve at various points. This will return `None` if the parameter +/// is outside the unit interval. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// # let c = EasingCurve::new(2.0, 4.0, EaseFunction::Linear); +/// assert_eq!(c.sample(-1.0), None); +/// assert_eq!(c.sample(0.0), Some(2.0)); +/// assert_eq!(c.sample(0.5), Some(3.0)); +/// assert_eq!(c.sample(1.0), Some(4.0)); +/// assert_eq!(c.sample(2.0), None); +/// ``` +/// +/// [`sample_clamped`] will clamp the parameter to the unit interval, so it +/// always returns a value. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// # let c = EasingCurve::new(2.0, 4.0, EaseFunction::Linear); +/// assert_eq!(c.sample_clamped(-1.0), 2.0); +/// assert_eq!(c.sample_clamped(0.0), 2.0); +/// assert_eq!(c.sample_clamped(0.5), 3.0); +/// assert_eq!(c.sample_clamped(1.0), 4.0); +/// assert_eq!(c.sample_clamped(2.0), 4.0); +/// ``` +/// +/// `EasingCurve` can be used with any type that implements the [`Ease`] trait. +/// This includes many math types, like vectors and rotations. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// let c = EasingCurve::new( +/// Vec2::new(0.0, 4.0), +/// Vec2::new(2.0, 8.0), +/// EaseFunction::Linear, +/// ); +/// +/// assert_eq!(c.sample_clamped(0.5), Vec2::new(1.0, 6.0)); +/// ``` +/// +/// ``` +/// # use bevy_math::prelude::*; +/// # use approx::assert_abs_diff_eq; +/// let c = EasingCurve::new( +/// Rot2::degrees(10.0), +/// Rot2::degrees(20.0), +/// EaseFunction::Linear, +/// ); +/// +/// assert_abs_diff_eq!(c.sample_clamped(0.5), Rot2::degrees(15.0)); +/// ``` +/// +/// As a shortcut, an `EasingCurve` between `0.0` and `1.0` can be replaced by +/// [`EaseFunction`]. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// # let t = 0.5; +/// let f = EaseFunction::SineIn; +/// let c = EasingCurve::new(0.0, 1.0, EaseFunction::SineIn); +/// +/// assert_eq!(f.sample(t), c.sample(t)); +/// ``` +/// /// [easing function]: EaseFunction /// [the unit interval]: Interval::UNIT +/// [`sample`]: EasingCurve::sample +/// [`sample_clamped`]: EasingCurve::sample_clamped #[derive(Clone, Debug)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] @@ -194,13 +272,102 @@ where } } +/// Configuration options for the [`EaseFunction::Steps`] curves. This closely replicates the +/// [CSS step function specification]. +/// +/// [CSS step function specification]: https://developer.mozilla.org/en-US/docs/Web/CSS/easing-function/steps#description +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + feature = "bevy_reflect", + derive(bevy_reflect::Reflect), + reflect(Clone, Default, PartialEq) +)] +pub enum JumpAt { + /// Indicates that the first step happens when the animation begins. + /// + #[doc = include_str!("../../images/easefunction/StartSteps.svg")] + Start, + /// Indicates that the last step happens when the animation ends. + /// + #[doc = include_str!("../../images/easefunction/EndSteps.svg")] + #[default] + End, + /// Indicates neither early nor late jumps happen. + /// + #[doc = include_str!("../../images/easefunction/NoneSteps.svg")] + None, + /// Indicates both early and late jumps happen. + /// + #[doc = include_str!("../../images/easefunction/BothSteps.svg")] + Both, +} + +impl JumpAt { + #[inline] + pub(crate) fn eval(self, num_steps: usize, t: f32) -> f32 { + use crate::ops; + + let (a, b) = match self { + JumpAt::Start => (1.0, 0), + JumpAt::End => (0.0, 0), + JumpAt::None => (0.0, -1), + JumpAt::Both => (1.0, 1), + }; + + let current_step = ops::floor(t * num_steps as f32) + a; + let step_size = (num_steps as isize + b).max(1) as f32; + + (current_step / step_size).clamp(0.0, 1.0) + } +} + /// Curve functions over the [unit interval], commonly used for easing transitions. /// +/// `EaseFunction` can be used on its own to interpolate between `0.0` and `1.0`. +/// It can also be combined with [`EasingCurve`] to interpolate between other +/// intervals and types, including vectors and rotations. +/// +/// # Example +/// +/// [`sample`] the smoothstep function at various points. This will return `None` +/// if the parameter is outside the unit interval. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// let f = EaseFunction::SmoothStep; +/// +/// assert_eq!(f.sample(-1.0), None); +/// assert_eq!(f.sample(0.0), Some(0.0)); +/// assert_eq!(f.sample(0.5), Some(0.5)); +/// assert_eq!(f.sample(1.0), Some(1.0)); +/// assert_eq!(f.sample(2.0), None); +/// ``` +/// +/// [`sample_clamped`] will clamp the parameter to the unit interval, so it +/// always returns a value. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// # let f = EaseFunction::SmoothStep; +/// assert_eq!(f.sample_clamped(-1.0), 0.0); +/// assert_eq!(f.sample_clamped(0.0), 0.0); +/// assert_eq!(f.sample_clamped(0.5), 0.5); +/// assert_eq!(f.sample_clamped(1.0), 1.0); +/// assert_eq!(f.sample_clamped(2.0), 1.0); +/// ``` +/// +/// [`sample`]: EaseFunction::sample +/// [`sample_clamped`]: EaseFunction::sample_clamped /// [unit interval]: `Interval::UNIT` #[non_exhaustive] #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] +#[cfg_attr( + feature = "bevy_reflect", + derive(bevy_reflect::Reflect), + reflect(Clone, PartialEq) +)] // Note: Graphs are auto-generated via `tools/build-easefunction-graphs`. pub enum EaseFunction { /// `f(t) = t` @@ -428,10 +595,9 @@ pub enum EaseFunction { #[doc = include_str!("../../images/easefunction/BounceInOut.svg")] BounceInOut, - /// `n` steps connecting the start and the end - /// - #[doc = include_str!("../../images/easefunction/Steps.svg")] - Steps(usize), + /// `n` steps connecting the start and the end. Jumping behavior is customizable via + /// [`JumpAt`]. See [`JumpAt`] for all the options and visual examples. + Steps(usize, JumpAt), /// `f(omega,t) = 1 - (1 - t)²(2sin(omega * t) / omega + cos(omega * t))`, parametrized by `omega` /// @@ -684,8 +850,8 @@ mod easing_functions { } #[inline] - pub(crate) fn steps(num_steps: usize, t: f32) -> f32 { - ops::floor(t * num_steps as f32) / num_steps.max(1) as f32 + pub(crate) fn steps(num_steps: usize, jump_at: super::JumpAt, t: f32) -> f32 { + jump_at.eval(num_steps, t) } #[inline] @@ -734,14 +900,30 @@ impl EaseFunction { EaseFunction::BounceIn => easing_functions::bounce_in(t), EaseFunction::BounceOut => easing_functions::bounce_out(t), EaseFunction::BounceInOut => easing_functions::bounce_in_out(t), - EaseFunction::Steps(num_steps) => easing_functions::steps(*num_steps, t), + EaseFunction::Steps(num_steps, jump_at) => { + easing_functions::steps(*num_steps, *jump_at, t) + } EaseFunction::Elastic(omega) => easing_functions::elastic(*omega, t), } } } +impl Curve for EaseFunction { + #[inline] + fn domain(&self) -> Interval { + Interval::UNIT + } + + #[inline] + fn sample_unchecked(&self, t: f32) -> f32 { + self.eval(t) + } +} + #[cfg(test)] +#[cfg(feature = "approx")] mod tests { + use crate::{Vec2, Vec3, Vec3A}; use approx::assert_abs_diff_eq; @@ -903,4 +1085,118 @@ mod tests { ); }); } + + #[test] + fn jump_at_start() { + let jump_at = JumpAt::Start; + let num_steps = 4; + + [ + (0.0, 0.25), + (0.249, 0.25), + (0.25, 0.5), + (0.499, 0.5), + (0.5, 0.75), + (0.749, 0.75), + (0.75, 1.0), + (1.0, 1.0), + ] + .into_iter() + .for_each(|(t, expected)| { + assert_abs_diff_eq!(jump_at.eval(num_steps, t), expected); + }); + } + + #[test] + fn jump_at_end() { + let jump_at = JumpAt::End; + let num_steps = 4; + + [ + (0.0, 0.0), + (0.249, 0.0), + (0.25, 0.25), + (0.499, 0.25), + (0.5, 0.5), + (0.749, 0.5), + (0.75, 0.75), + (0.999, 0.75), + (1.0, 1.0), + ] + .into_iter() + .for_each(|(t, expected)| { + assert_abs_diff_eq!(jump_at.eval(num_steps, t), expected); + }); + } + + #[test] + fn jump_at_none() { + let jump_at = JumpAt::None; + let num_steps = 5; + + [ + (0.0, 0.0), + (0.199, 0.0), + (0.2, 0.25), + (0.399, 0.25), + (0.4, 0.5), + (0.599, 0.5), + (0.6, 0.75), + (0.799, 0.75), + (0.8, 1.0), + (0.999, 1.0), + (1.0, 1.0), + ] + .into_iter() + .for_each(|(t, expected)| { + assert_abs_diff_eq!(jump_at.eval(num_steps, t), expected); + }); + } + + #[test] + fn jump_at_both() { + let jump_at = JumpAt::Both; + let num_steps = 4; + + [ + (0.0, 0.2), + (0.249, 0.2), + (0.25, 0.4), + (0.499, 0.4), + (0.5, 0.6), + (0.749, 0.6), + (0.75, 0.8), + (0.999, 0.8), + (1.0, 1.0), + ] + .into_iter() + .for_each(|(t, expected)| { + assert_abs_diff_eq!(jump_at.eval(num_steps, t), expected); + }); + } + + #[test] + fn ease_function_curve() { + // Test that using `EaseFunction` directly is equivalent to `EasingCurve::new(0.0, 1.0, ...)`. + + let f = EaseFunction::SmoothStep; + let c = EasingCurve::new(0.0, 1.0, EaseFunction::SmoothStep); + + assert_eq!(f.domain(), c.domain()); + + [ + -1.0, + 0.0, + 0.5, + 1.0, + 2.0, + -f32::MIN_POSITIVE, + 1.0 + f32::EPSILON, + ] + .into_iter() + .for_each(|t| { + assert_eq!(f.sample(t), c.sample(t)); + assert_eq!(f.sample_clamped(t), c.sample_clamped(t)); + }); + } } diff --git a/crates/bevy_math/src/curve/interval.rs b/crates/bevy_math/src/curve/interval.rs index 007e523c95..03ffc0c486 100644 --- a/crates/bevy_math/src/curve/interval.rs +++ b/crates/bevy_math/src/curve/interval.rs @@ -18,7 +18,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// will always have some nonempty interior. #[derive(Debug, Clone, Copy, PartialEq, PartialOrd)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) diff --git a/crates/bevy_math/src/curve/mod.rs b/crates/bevy_math/src/curve/mod.rs index 4facf60f5a..94e7b0151e 100644 --- a/crates/bevy_math/src/curve/mod.rs +++ b/crates/bevy_math/src/curve/mod.rs @@ -1061,7 +1061,7 @@ mod tests { let start = Vec2::ZERO; let end = Vec2::new(1.0, 2.0); - let curve = EasingCurve::new(start, end, EaseFunction::Steps(4)); + let curve = EasingCurve::new(start, end, EaseFunction::Steps(4, JumpAt::End)); [ (0.0, start), (0.249, start), diff --git a/crates/bevy_math/src/curve/sample_curves.rs b/crates/bevy_math/src/curve/sample_curves.rs index 681500328b..f0fa928abb 100644 --- a/crates/bevy_math/src/curve/sample_curves.rs +++ b/crates/bevy_math/src/curve/sample_curves.rs @@ -4,6 +4,7 @@ use super::cores::{EvenCore, EvenCoreError, UnevenCore, UnevenCoreError}; use super::{Curve, Interval}; use crate::StableInterpolate; +#[cfg(feature = "bevy_reflect")] use alloc::format; use core::any::type_name; use core::fmt::{self, Debug}; diff --git a/crates/bevy_math/src/direction.rs b/crates/bevy_math/src/direction.rs index 3e32d782ba..45138f20e2 100644 --- a/crates/bevy_math/src/direction.rs +++ b/crates/bevy_math/src/direction.rs @@ -15,14 +15,19 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg(all(debug_assertions, feature = "std"))] use std::eprintln; +use thiserror::Error; + /// An error indicating that a direction is invalid. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Error)] pub enum InvalidDirectionError { /// The length of the direction vector is zero or very close to zero. + #[error("The length of the direction vector is zero or very close to zero")] Zero, /// The length of the direction vector is `std::f32::INFINITY`. + #[error("The length of the direction vector is `std::f32::INFINITY`")] Infinite, /// The length of the direction vector is `NaN`. + #[error("The length of the direction vector is `NaN`")] NaN, } @@ -41,15 +46,6 @@ impl InvalidDirectionError { } } -impl core::fmt::Display for InvalidDirectionError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "Direction can not be zero (or very close to zero), or non-finite." - ) - } -} - /// Checks that a vector with the given squared length is normalized. /// /// Warns for small error with a length threshold of approximately `1e-4`, @@ -73,17 +69,24 @@ fn assert_is_normalized(message: &str, length_squared: f32) { } else if length_error_squared > 2e-4 { // Length error is approximately 1e-4 or more. #[cfg(feature = "std")] - eprintln!( - "Warning: {message} The length is {}.", - ops::sqrt(length_squared) - ); + #[expect(clippy::print_stderr, reason = "Allowed behind `std` feature gate.")] + { + eprintln!( + "Warning: {message} The length is {}.", + ops::sqrt(length_squared) + ); + } } } /// A normalized vector pointing in a direction in 2D space #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -198,9 +201,11 @@ impl Dir2 { /// let dir2 = Dir2::Y; /// /// let result1 = dir1.slerp(dir2, 1.0 / 3.0); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(result1, Dir2::from_xy(0.75_f32.sqrt(), 0.5).unwrap()); /// /// let result2 = dir1.slerp(dir2, 0.5); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(result2, Dir2::from_xy(0.5_f32.sqrt(), 0.5_f32.sqrt()).unwrap()); /// ``` #[inline] @@ -355,7 +360,11 @@ impl approx::UlpsEq for Dir2 { /// A normalized vector pointing in a direction in 3D space #[derive(Clone, Copy, Debug, PartialEq, Into)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -457,6 +466,7 @@ impl Dir3 { /// let dir2 = Dir3::Y; /// /// let result1 = dir1.slerp(dir2, 1.0 / 3.0); + /// #[cfg(feature = "approx")] /// assert_relative_eq!( /// result1, /// Dir3::from_xyz(0.75_f32.sqrt(), 0.5, 0.0).unwrap(), @@ -464,6 +474,7 @@ impl Dir3 { /// ); /// /// let result2 = dir1.slerp(dir2, 0.5); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(result2, Dir3::from_xyz(0.5_f32.sqrt(), 0.5_f32.sqrt(), 0.0).unwrap()); /// ``` #[inline] @@ -614,7 +625,11 @@ impl approx::UlpsEq for Dir3 { /// This may or may not be faster than [`Dir3`]: make sure to benchmark! #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -716,6 +731,7 @@ impl Dir3A { /// let dir2 = Dir3A::Y; /// /// let result1 = dir1.slerp(dir2, 1.0 / 3.0); + /// #[cfg(feature = "approx")] /// assert_relative_eq!( /// result1, /// Dir3A::from_xyz(0.75_f32.sqrt(), 0.5, 0.0).unwrap(), @@ -723,6 +739,7 @@ impl Dir3A { /// ); /// /// let result2 = dir1.slerp(dir2, 0.5); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(result2, Dir3A::from_xyz(0.5_f32.sqrt(), 0.5_f32.sqrt(), 0.0).unwrap()); /// ``` #[inline] @@ -850,6 +867,7 @@ impl approx::UlpsEq for Dir3A { } #[cfg(test)] +#[cfg(feature = "approx")] mod tests { use crate::ops; diff --git a/crates/bevy_math/src/float_ord.rs b/crates/bevy_math/src/float_ord.rs index 2369b0f6dc..e69c6b35e0 100644 --- a/crates/bevy_math/src/float_ord.rs +++ b/crates/bevy_math/src/float_ord.rs @@ -20,7 +20,7 @@ use bevy_reflect::Reflect; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Hash) + reflect(Debug, PartialEq, Hash, Clone) )] pub struct FloatOrd(pub f32); diff --git a/crates/bevy_math/src/isometry.rs b/crates/bevy_math/src/isometry.rs index e01a8cd713..a221615b0a 100644 --- a/crates/bevy_math/src/isometry.rs +++ b/crates/bevy_math/src/isometry.rs @@ -88,7 +88,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -366,7 +366,7 @@ impl UlpsEq for Isometry2d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -589,6 +589,7 @@ impl UlpsEq for Isometry3d { } #[cfg(test)] +#[cfg(feature = "approx")] mod tests { use super::*; use crate::{vec2, vec3, vec3a}; diff --git a/crates/bevy_math/src/ops.rs b/crates/bevy_math/src/ops.rs index e9d27ac54a..3a7765939d 100644 --- a/crates/bevy_math/src/ops.rs +++ b/crates/bevy_math/src/ops.rs @@ -19,7 +19,7 @@ // - `f32::gamma` // - `f32::ln_gamma` -#[cfg(not(feature = "libm"))] +#[cfg(all(not(feature = "libm"), feature = "std"))] #[expect( clippy::disallowed_methods, reason = "Many of the disallowed methods are disallowed to force code to use the feature-conditional re-exports from this module, but this module itself is exempt from that rule." @@ -233,7 +233,7 @@ mod std_ops { } } -#[cfg(feature = "libm")] +#[cfg(any(feature = "libm", all(feature = "nostd-libm", not(feature = "std"))))] mod libm_ops { /// Raises a number to a floating point power. @@ -448,7 +448,7 @@ mod libm_ops { } } -#[cfg(all(feature = "libm", not(feature = "std")))] +#[cfg(all(any(feature = "libm", feature = "nostd-libm"), not(feature = "std")))] mod libm_ops_for_no_std { //! Provides standardized names for [`f32`] operations which may not be //! supported on `no_std` platforms. @@ -510,6 +510,14 @@ mod libm_ops_for_no_std { libm::floorf(x) } + /// Returns the smallest integer greater than or equal to `x`. + /// + /// Precision is specified when the `libm` feature is enabled. + #[inline(always)] + pub fn ceil(x: f32) -> f32 { + libm::ceilf(x) + } + /// Returns the fractional part of `x`. /// /// This function always returns the precise result. @@ -581,6 +589,14 @@ mod std_ops_for_no_std { f32::floor(x) } + /// Returns the smallest integer greater than or equal to `x`. + /// + /// This function always returns the precise result. + #[inline(always)] + pub fn ceil(x: f32) -> f32 { + f32::ceil(x) + } + /// Returns the fractional part of `x`. /// /// This function always returns the precise result. @@ -590,20 +606,24 @@ mod std_ops_for_no_std { } } -#[cfg(feature = "libm")] +#[cfg(any(feature = "libm", all(feature = "nostd-libm", not(feature = "std"))))] pub use libm_ops::*; -#[cfg(not(feature = "libm"))] +#[cfg(all(not(feature = "libm"), feature = "std"))] pub use std_ops::*; #[cfg(feature = "std")] pub use std_ops_for_no_std::*; -#[cfg(all(feature = "libm", not(feature = "std")))] +#[cfg(all(any(feature = "libm", feature = "nostd-libm"), not(feature = "std")))] pub use libm_ops_for_no_std::*; -#[cfg(all(not(feature = "libm"), not(feature = "std")))] -compile_error!("Either the `libm` feature or the `std` feature must be enabled."); +#[cfg(all( + not(feature = "libm"), + not(feature = "std"), + not(feature = "nostd-libm") +))] +compile_error!("Either the `libm`, `std`, or `nostd-libm` feature must be enabled."); /// This extension trait covers shortfall in determinacy from the lack of a `libm` counterpart /// to `f32::powi`. Use this for the common small exponents. diff --git a/crates/bevy_math/src/primitives/dim2.rs b/crates/bevy_math/src/primitives/dim2.rs index c336edac45..613345bcd8 100644 --- a/crates/bevy_math/src/primitives/dim2.rs +++ b/crates/bevy_math/src/primitives/dim2.rs @@ -5,7 +5,7 @@ use thiserror::Error; use super::{Measured2d, Primitive2d, WindingOrder}; use crate::{ ops::{self, FloatPow}, - Dir2, Rot2, Vec2, + Dir2, InvalidDirectionError, Isometry2d, Ray2d, Rot2, Vec2, }; #[cfg(feature = "alloc")] @@ -25,7 +25,7 @@ use alloc::{boxed::Box, vec::Vec}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -112,7 +112,7 @@ impl Measured2d for Circle { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -279,7 +279,7 @@ impl Arc2d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -422,7 +422,7 @@ impl CircularSector { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -776,7 +776,7 @@ mod arc_tests { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -926,7 +926,7 @@ impl Measured2d for Ellipse { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1025,7 +1025,7 @@ impl Measured2d for Annulus { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1161,7 +1161,7 @@ impl Measured2d for Rhombus { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1199,7 +1199,11 @@ impl Plane2d { /// For a finite line: [`Segment2d`] #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -1211,10 +1215,14 @@ pub struct Line2d { } impl Primitive2d for Line2d {} -/// A segment of a line going through the origin along a direction in 2D space. +/// A line segment defined by two endpoints in 2D space. #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -1227,7 +1235,7 @@ pub struct Segment2d { impl Primitive2d for Segment2d {} impl Segment2d { - /// Create a new `Segment2d` from its endpoints + /// Create a new `Segment2d` from its endpoints. #[inline(always)] pub const fn new(point1: Vec2, point2: Vec2) -> Self { Self { @@ -1235,62 +1243,194 @@ impl Segment2d { } } - /// Create a new `Segment2d` from its endpoints and compute its geometric center - /// - /// # Panics - /// - /// Panics if `point1 == point2` + /// Create a new `Segment2d` from its endpoints and compute its geometric center. #[inline(always)] #[deprecated(since = "0.16.0", note = "Use the `new` constructor instead")] pub fn from_points(point1: Vec2, point2: Vec2) -> (Self, Vec2) { (Self::new(point1, point2), (point1 + point2) / 2.) } - /// Create a new `Segment2d` at the origin from a `direction` and `length` + /// Create a new `Segment2d` centered at the origin with the given direction and length. + /// + /// The endpoints will be at `-direction * length / 2.0` and `direction * length / 2.0`. #[inline(always)] - pub fn from_direction_and_length(direction: Dir2, length: f32) -> Segment2d { - let half_length = length / 2.; - Self::new(direction * -half_length, direction * half_length) + pub fn from_direction_and_length(direction: Dir2, length: f32) -> Self { + let endpoint = 0.5 * length * direction; + Self { + vertices: [-endpoint, endpoint], + } } - /// Get the position of the first point on the line segment + /// Create a new `Segment2d` centered at the origin from a vector representing + /// the direction and length of the line segment. + /// + /// The endpoints will be at `-scaled_direction / 2.0` and `scaled_direction / 2.0`. + #[inline(always)] + pub fn from_scaled_direction(scaled_direction: Vec2) -> Self { + let endpoint = 0.5 * scaled_direction; + Self { + vertices: [-endpoint, endpoint], + } + } + + /// Create a new `Segment2d` starting from the origin of the given `ray`, + /// going in the direction of the ray for the given `length`. + /// + /// The endpoints will be at `ray.origin` and `ray.origin + length * ray.direction`. + #[inline(always)] + pub fn from_ray_and_length(ray: Ray2d, length: f32) -> Self { + Self { + vertices: [ray.origin, ray.get_point(length)], + } + } + + /// Get the position of the first endpoint of the line segment. #[inline(always)] pub fn point1(&self) -> Vec2 { self.vertices[0] } - /// Get the position of the second point on the line segment + /// Get the position of the second endpoint of the line segment. #[inline(always)] pub fn point2(&self) -> Vec2 { self.vertices[1] } - /// Get the segment's center + /// Compute the midpoint between the two endpoints of the line segment. #[inline(always)] #[doc(alias = "midpoint")] pub fn center(&self) -> Vec2 { - (self.point1() + self.point2()) / 2. + self.point1().midpoint(self.point2()) } - /// Get the segment's length + /// Compute the length of the line segment. #[inline(always)] pub fn length(&self) -> f32 { self.point1().distance(self.point2()) } - /// Get the segment translated by the given vector + /// Compute the squared length of the line segment. + #[inline(always)] + pub fn length_squared(&self) -> f32 { + self.point1().distance_squared(self.point2()) + } + + /// Compute the normalized direction pointing from the first endpoint to the second endpoint. + /// + /// For the non-panicking version, see [`Segment2d::try_direction`]. + /// + /// # Panics + /// + /// Panics if a valid direction could not be computed, for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn direction(&self) -> Dir2 { + self.try_direction().unwrap_or_else(|err| { + panic!("Failed to compute the direction of a line segment: {err}") + }) + } + + /// Try to compute the normalized direction pointing from the first endpoint to the second endpoint. + /// + /// Returns [`Err(InvalidDirectionError)`](InvalidDirectionError) if a valid direction could not be computed, + /// for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn try_direction(&self) -> Result { + Dir2::new(self.scaled_direction()) + } + + /// Compute the vector from the first endpoint to the second endpoint. + #[inline(always)] + pub fn scaled_direction(&self) -> Vec2 { + self.point2() - self.point1() + } + + /// Compute the normalized counterclockwise normal on the left-hand side of the line segment. + /// + /// For the non-panicking version, see [`Segment2d::try_left_normal`]. + /// + /// # Panics + /// + /// Panics if a valid normal could not be computed, for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn left_normal(&self) -> Dir2 { + self.try_left_normal().unwrap_or_else(|err| { + panic!("Failed to compute the left-hand side normal of a line segment: {err}") + }) + } + + /// Try to compute the normalized counterclockwise normal on the left-hand side of the line segment. + /// + /// Returns [`Err(InvalidDirectionError)`](InvalidDirectionError) if a valid normal could not be computed, + /// for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn try_left_normal(&self) -> Result { + Dir2::new(self.scaled_left_normal()) + } + + /// Compute the non-normalized counterclockwise normal on the left-hand side of the line segment. + /// + /// The length of the normal is the distance between the endpoints. + #[inline(always)] + pub fn scaled_left_normal(&self) -> Vec2 { + let scaled_direction = self.scaled_direction(); + Vec2::new(-scaled_direction.y, scaled_direction.x) + } + + /// Compute the normalized clockwise normal on the right-hand side of the line segment. + /// + /// For the non-panicking version, see [`Segment2d::try_right_normal`]. + /// + /// # Panics + /// + /// Panics if a valid normal could not be computed, for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn right_normal(&self) -> Dir2 { + self.try_right_normal().unwrap_or_else(|err| { + panic!("Failed to compute the right-hand side normal of a line segment: {err}") + }) + } + + /// Try to compute the normalized clockwise normal on the right-hand side of the line segment. + /// + /// Returns [`Err(InvalidDirectionError)`](InvalidDirectionError) if a valid normal could not be computed, + /// for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn try_right_normal(&self) -> Result { + Dir2::new(self.scaled_right_normal()) + } + + /// Compute the non-normalized clockwise normal on the right-hand side of the line segment. + /// + /// The length of the normal is the distance between the endpoints. + #[inline(always)] + pub fn scaled_right_normal(&self) -> Vec2 { + let scaled_direction = self.scaled_direction(); + Vec2::new(scaled_direction.y, -scaled_direction.x) + } + + /// Compute the segment transformed by the given [`Isometry2d`]. + #[inline(always)] + pub fn transformed(&self, isometry: impl Into) -> Self { + let isometry: Isometry2d = isometry.into(); + Self::new( + isometry.transform_point(self.point1()), + isometry.transform_point(self.point2()), + ) + } + + /// Compute the segment translated by the given vector. #[inline(always)] pub fn translated(&self, translation: Vec2) -> Segment2d { Self::new(self.point1() + translation, self.point2() + translation) } - /// Compute a new segment, based on the original segment rotated around the origin + /// Compute the segment rotated around the origin by the given rotation. #[inline(always)] pub fn rotated(&self, rotation: Rot2) -> Segment2d { Segment2d::new(rotation * self.point1(), rotation * self.point2()) } - /// Compute a new segment, based on the original segment rotated around a given point + /// Compute the segment rotated around the given point by the given rotation. #[inline(always)] pub fn rotated_around(&self, rotation: Rot2, point: Vec2) -> Segment2d { // We offset our segment so that our segment is rotated as if from the origin, then we can apply the offset back @@ -1299,28 +1439,57 @@ impl Segment2d { rotated.translated(point) } - /// Compute a new segment, based on the original segment rotated around its center + /// Compute the segment rotated around its own center. #[inline(always)] pub fn rotated_around_center(&self, rotation: Rot2) -> Segment2d { self.rotated_around(rotation, self.center()) } - /// Get the segment with its center at the origin + /// Compute the segment with its center at the origin, keeping the same direction and length. #[inline(always)] pub fn centered(&self) -> Segment2d { let center = self.center(); self.translated(-center) } - /// Get the segment with a new length + /// Compute the segment with a new length, keeping the same direction and center. #[inline(always)] pub fn resized(&self, length: f32) -> Segment2d { let offset_from_origin = self.center(); - let centered = self.centered(); + let centered = self.translated(-offset_from_origin); let ratio = length / self.length(); let segment = Segment2d::new(centered.point1() * ratio, centered.point2() * ratio); segment.translated(offset_from_origin) } + + /// Reverses the direction of the line segment by swapping the endpoints. + #[inline(always)] + pub fn reverse(&mut self) { + let [point1, point2] = &mut self.vertices; + core::mem::swap(point1, point2); + } + + /// Returns the line segment with its direction reversed by swapping the endpoints. + #[inline(always)] + #[must_use] + pub fn reversed(mut self) -> Self { + self.reverse(); + self + } +} + +impl From<[Vec2; 2]> for Segment2d { + #[inline(always)] + fn from(vertices: [Vec2; 2]) -> Self { + Self { vertices } + } +} + +impl From<(Vec2, Vec2)> for Segment2d { + #[inline(always)] + fn from((point1, point2): (Vec2, Vec2)) -> Self { + Self::new(point1, point2) + } } /// A series of connected line segments in 2D space. @@ -1328,7 +1497,11 @@ impl Segment2d { /// For a version without generics: [`BoxedPolyline2d`] #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -1397,7 +1570,7 @@ impl BoxedPolyline2d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1568,7 +1741,7 @@ impl Measured2d for Triangle2d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1658,7 +1831,11 @@ impl Measured2d for Rectangle { /// For a version without generics: [`BoxedPolygon`] #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -1708,7 +1885,11 @@ impl From> for Polygon { /// A convex polygon with `N` vertices. #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -1827,7 +2008,7 @@ impl BoxedPolygon { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1973,7 +2154,7 @@ impl Measured2d for RegularPolygon { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_math/src/primitives/dim3.rs b/crates/bevy_math/src/primitives/dim3.rs index a828fa8247..a36db0ade5 100644 --- a/crates/bevy_math/src/primitives/dim3.rs +++ b/crates/bevy_math/src/primitives/dim3.rs @@ -3,7 +3,7 @@ use core::f32::consts::{FRAC_PI_3, PI}; use super::{Circle, Measured2d, Measured3d, Primitive2d, Primitive3d}; use crate::{ ops::{self, FloatPow}, - Dir3, InvalidDirectionError, Isometry3d, Mat3, Vec2, Vec3, + Dir3, InvalidDirectionError, Isometry3d, Mat3, Ray3d, Vec2, Vec3, }; #[cfg(feature = "bevy_reflect")] @@ -21,7 +21,7 @@ use alloc::{boxed::Box, vec::Vec}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -93,7 +93,7 @@ impl Measured3d for Sphere { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -165,7 +165,7 @@ impl Plane3d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -338,7 +338,11 @@ impl InfinitePlane3d { /// For a finite line: [`Segment3d`] #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -349,15 +353,19 @@ pub struct Line3d { } impl Primitive3d for Line3d {} -/// A segment of a line going through the origin along a direction in 3D space. -#[doc(alias = "LineSegment3d")] +/// A line segment defined by two endpoints in 3D space. #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) )] +#[doc(alias = "LineSegment3d")] pub struct Segment3d { /// The endpoints of the line segment. pub vertices: [Vec3; 2], @@ -365,7 +373,7 @@ pub struct Segment3d { impl Primitive3d for Segment3d {} impl Segment3d { - /// Create a new `Segment3d` from its endpoints + /// Create a new `Segment3d` from its endpoints. #[inline(always)] pub const fn new(point1: Vec3, point2: Vec3) -> Self { Self { @@ -373,65 +381,130 @@ impl Segment3d { } } - /// Create a new `Segment3d` from a direction and full length of the segment - #[inline(always)] - pub fn from_direction_and_length(direction: Dir3, length: f32) -> Self { - let half_length = length / 2.; - Self::new(direction * -half_length, direction * half_length) - } - - /// Create a new `Segment3d` from its endpoints and compute its geometric center - /// - /// # Panics - /// - /// Panics if `point1 == point2` + /// Create a new `Segment3d` from its endpoints and compute its geometric center. #[inline(always)] #[deprecated(since = "0.16.0", note = "Use the `new` constructor instead")] pub fn from_points(point1: Vec3, point2: Vec3) -> (Self, Vec3) { (Self::new(point1, point2), (point1 + point2) / 2.) } - /// Get the position of the first point on the line segment + /// Create a new `Segment3d` centered at the origin with the given direction and length. + /// + /// The endpoints will be at `-direction * length / 2.0` and `direction * length / 2.0`. + #[inline(always)] + pub fn from_direction_and_length(direction: Dir3, length: f32) -> Self { + let endpoint = 0.5 * length * direction; + Self { + vertices: [-endpoint, endpoint], + } + } + + /// Create a new `Segment3d` centered at the origin from a vector representing + /// the direction and length of the line segment. + /// + /// The endpoints will be at `-scaled_direction / 2.0` and `scaled_direction / 2.0`. + #[inline(always)] + pub fn from_scaled_direction(scaled_direction: Vec3) -> Self { + let endpoint = 0.5 * scaled_direction; + Self { + vertices: [-endpoint, endpoint], + } + } + + /// Create a new `Segment3d` starting from the origin of the given `ray`, + /// going in the direction of the ray for the given `length`. + /// + /// The endpoints will be at `ray.origin` and `ray.origin + length * ray.direction`. + #[inline(always)] + pub fn from_ray_and_length(ray: Ray3d, length: f32) -> Self { + Self { + vertices: [ray.origin, ray.get_point(length)], + } + } + + /// Get the position of the first endpoint of the line segment. #[inline(always)] pub fn point1(&self) -> Vec3 { self.vertices[0] } - /// Get the position of the second point on the line segment + /// Get the position of the second endpoint of the line segment. #[inline(always)] pub fn point2(&self) -> Vec3 { self.vertices[1] } - /// Get the center of the segment + /// Compute the midpoint between the two endpoints of the line segment. #[inline(always)] #[doc(alias = "midpoint")] pub fn center(&self) -> Vec3 { - (self.point1() + self.point2()) / 2. + self.point1().midpoint(self.point2()) } - /// Get the length of the segment + /// Compute the length of the line segment. #[inline(always)] pub fn length(&self) -> f32 { self.point1().distance(self.point2()) } - /// Get the segment translated by a vector + /// Compute the squared length of the line segment. + #[inline(always)] + pub fn length_squared(&self) -> f32 { + self.point1().distance_squared(self.point2()) + } + + /// Compute the normalized direction pointing from the first endpoint to the second endpoint. + /// + /// For the non-panicking version, see [`Segment3d::try_direction`]. + /// + /// # Panics + /// + /// Panics if a valid direction could not be computed, for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn direction(&self) -> Dir3 { + self.try_direction().unwrap_or_else(|err| { + panic!("Failed to compute the direction of a line segment: {err}") + }) + } + + /// Try to compute the normalized direction pointing from the first endpoint to the second endpoint. + /// + /// Returns [`Err(InvalidDirectionError)`](InvalidDirectionError) if a valid direction could not be computed, + /// for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn try_direction(&self) -> Result { + Dir3::new(self.scaled_direction()) + } + + /// Compute the vector from the first endpoint to the second endpoint. + #[inline(always)] + pub fn scaled_direction(&self) -> Vec3 { + self.point2() - self.point1() + } + + /// Compute the segment transformed by the given [`Isometry3d`]. + #[inline(always)] + pub fn transformed(&self, isometry: impl Into) -> Self { + let isometry: Isometry3d = isometry.into(); + Self::new( + isometry.transform_point(self.point1()).into(), + isometry.transform_point(self.point2()).into(), + ) + } + + /// Compute the segment translated by the given vector. #[inline(always)] pub fn translated(&self, translation: Vec3) -> Segment3d { Self::new(self.point1() + translation, self.point2() + translation) } - /// Compute a new segment, based on the original segment rotated around the origin + /// Compute the segment rotated around the origin by the given rotation. #[inline(always)] pub fn rotated(&self, rotation: Quat) -> Segment3d { - Segment3d::new( - rotation.mul_vec3(self.point1()), - rotation.mul_vec3(self.point2()), - ) + Segment3d::new(rotation * self.point1(), rotation * self.point2()) } - /// Compute a new segment, based on the original segment rotated around a given point + /// Compute the segment rotated around the given point by the given rotation. #[inline(always)] pub fn rotated_around(&self, rotation: Quat, point: Vec3) -> Segment3d { // We offset our segment so that our segment is rotated as if from the origin, then we can apply the offset back @@ -440,28 +513,57 @@ impl Segment3d { rotated.translated(point) } - /// Compute a new segment, based on the original segment rotated around its center + /// Compute the segment rotated around its own center. #[inline(always)] pub fn rotated_around_center(&self, rotation: Quat) -> Segment3d { self.rotated_around(rotation, self.center()) } - /// Get the segment offset so that it's center is at the origin + /// Compute the segment with its center at the origin, keeping the same direction and length. #[inline(always)] pub fn centered(&self) -> Segment3d { let center = self.center(); self.translated(-center) } - /// Get the segment with a new length + /// Compute the segment with a new length, keeping the same direction and center. #[inline(always)] pub fn resized(&self, length: f32) -> Segment3d { let offset_from_origin = self.center(); - let centered = self.centered(); + let centered = self.translated(-offset_from_origin); let ratio = length / self.length(); let segment = Segment3d::new(centered.point1() * ratio, centered.point2() * ratio); segment.translated(offset_from_origin) } + + /// Reverses the direction of the line segment by swapping the endpoints. + #[inline(always)] + pub fn reverse(&mut self) { + let [point1, point2] = &mut self.vertices; + core::mem::swap(point1, point2); + } + + /// Returns the line segment with its direction reversed by swapping the endpoints. + #[inline(always)] + #[must_use] + pub fn reversed(mut self) -> Self { + self.reverse(); + self + } +} + +impl From<[Vec3; 2]> for Segment3d { + #[inline(always)] + fn from(vertices: [Vec3; 2]) -> Self { + Self { vertices } + } +} + +impl From<(Vec3, Vec3)> for Segment3d { + #[inline(always)] + fn from((point1, point2): (Vec3, Vec3)) -> Self { + Self::new(point1, point2) + } } /// A series of connected line segments in 3D space. @@ -469,7 +571,11 @@ impl Segment3d { /// For a version without generics: [`BoxedPolyline3d`] #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -539,7 +645,7 @@ impl BoxedPolyline3d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -631,7 +737,7 @@ impl Measured3d for Cuboid { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -709,7 +815,7 @@ impl Measured3d for Cylinder { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -779,7 +885,7 @@ impl Measured3d for Capsule3d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -861,7 +967,7 @@ impl Measured3d for Cone { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -913,7 +1019,7 @@ pub enum TorusKind { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1024,7 +1130,7 @@ impl Measured3d for Torus { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1217,7 +1323,7 @@ impl Measured2d for Triangle3d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_math/src/ray.rs b/crates/bevy_math/src/ray.rs index 273ed61fa4..5fe9c3740a 100644 --- a/crates/bevy_math/src/ray.rs +++ b/crates/bevy_math/src/ray.rs @@ -12,7 +12,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// An infinite half-line starting at `origin` and going in `direction` in 2D space. #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Deserialize, Serialize) @@ -54,7 +58,11 @@ impl Ray2d { /// An infinite half-line starting at `origin` and going in `direction` in 3D space. #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Deserialize, Serialize) diff --git a/crates/bevy_math/src/rects/irect.rs b/crates/bevy_math/src/rects/irect.rs index 73e830f085..74da994b36 100644 --- a/crates/bevy_math/src/rects/irect.rs +++ b/crates/bevy_math/src/rects/irect.rs @@ -19,7 +19,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Hash, Default) + reflect(Debug, PartialEq, Hash, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_math/src/rects/rect.rs b/crates/bevy_math/src/rects/rect.rs index 901a569a71..92b7059945 100644 --- a/crates/bevy_math/src/rects/rect.rs +++ b/crates/bevy_math/src/rects/rect.rs @@ -19,7 +19,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_math/src/rects/urect.rs b/crates/bevy_math/src/rects/urect.rs index 5412750465..9d19c5ae7c 100644 --- a/crates/bevy_math/src/rects/urect.rs +++ b/crates/bevy_math/src/rects/urect.rs @@ -19,7 +19,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Hash, Default) + reflect(Debug, PartialEq, Hash, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_math/src/rotation2d.rs b/crates/bevy_math/src/rotation2d.rs index 40760dcb84..1320f6363a 100644 --- a/crates/bevy_math/src/rotation2d.rs +++ b/crates/bevy_math/src/rotation2d.rs @@ -30,9 +30,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// assert_eq!(rotation2.as_radians(), PI / 4.0); /// /// // "Add" rotations together using `*` +/// #[cfg(feature = "approx")] /// assert_relative_eq!(rotation1 * rotation2, Rot2::degrees(135.0)); /// /// // Rotate vectors +/// #[cfg(feature = "approx")] /// assert_relative_eq!(rotation1 * Vec2::X, Vec2::Y); /// ``` #[derive(Clone, Copy, Debug, PartialEq)] @@ -40,7 +42,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -116,9 +118,11 @@ impl Rot2 { /// /// let rot1 = Rot2::radians(3.0 * FRAC_PI_2); /// let rot2 = Rot2::radians(-FRAC_PI_2); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(rot1, rot2); /// /// let rot3 = Rot2::radians(PI); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(rot1 * rot1, rot3); /// ``` #[inline] @@ -141,9 +145,11 @@ impl Rot2 { /// /// let rot1 = Rot2::degrees(270.0); /// let rot2 = Rot2::degrees(-90.0); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(rot1, rot2); /// /// let rot3 = Rot2::degrees(180.0); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(rot1 * rot1, rot3); /// ``` #[inline] @@ -165,9 +171,11 @@ impl Rot2 { /// /// let rot1 = Rot2::turn_fraction(0.75); /// let rot2 = Rot2::turn_fraction(-0.25); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(rot1, rot2); /// /// let rot3 = Rot2::turn_fraction(0.5); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(rot1 * rot1, rot3); /// ``` #[inline] diff --git a/crates/bevy_math/src/sampling/shape_sampling.rs b/crates/bevy_math/src/sampling/shape_sampling.rs index d1371114bd..3be0ead1da 100644 --- a/crates/bevy_math/src/sampling/shape_sampling.rs +++ b/crates/bevy_math/src/sampling/shape_sampling.rs @@ -234,7 +234,7 @@ impl ShapeSample for Rectangle { fn sample_boundary(&self, rng: &mut R) -> Vec2 { let primary_side = rng.gen_range(-1.0..1.0); - let other_side = if rng.gen() { -1.0 } else { 1.0 }; + let other_side = if rng.r#gen() { -1.0 } else { 1.0 }; if self.half_size.x + self.half_size.y > 0.0 { if rng.gen_bool((self.half_size.x / (self.half_size.x + self.half_size.y)) as f64) { @@ -261,7 +261,7 @@ impl ShapeSample for Cuboid { fn sample_boundary(&self, rng: &mut R) -> Vec3 { let primary_side1 = rng.gen_range(-1.0..1.0); let primary_side2 = rng.gen_range(-1.0..1.0); - let other_side = if rng.gen() { -1.0 } else { 1.0 }; + let other_side = if rng.r#gen() { -1.0 } else { 1.0 }; if let Ok(dist) = WeightedIndex::new([ self.half_size.y * self.half_size.z, @@ -425,7 +425,7 @@ impl ShapeSample for Cylinder { if self.radius + 2.0 * self.half_height > 0.0 { if rng.gen_bool((self.radius / (self.radius + 2.0 * self.half_height)) as f64) { let Vec2 { x, y: z } = self.base().sample_interior(rng); - if rng.gen() { + if rng.r#gen() { Vec3::new(x, self.half_height, z) } else { Vec3::new(x, -self.half_height, z) diff --git a/crates/bevy_math/src/sampling/standard.rs b/crates/bevy_math/src/sampling/standard.rs index 6750d5c6d5..d4e82fdc81 100644 --- a/crates/bevy_math/src/sampling/standard.rs +++ b/crates/bevy_math/src/sampling/standard.rs @@ -12,7 +12,7 @@ //! let random_direction1: Dir3 = random(); //! //! // Random direction using the rng constructed above -//! let random_direction2: Dir3 = rng.gen(); +//! let random_direction2: Dir3 = rng.r#gen(); //! //! // The same as the previous but with different syntax //! let random_direction3 = Dir3::from_rng(&mut rng); @@ -49,7 +49,7 @@ where { /// Construct a value of this type uniformly at random using `rng` as the source of randomness. fn from_rng(rng: &mut R) -> Self { - rng.gen() + rng.r#gen() } } diff --git a/crates/bevy_mesh/Cargo.toml b/crates/bevy_mesh/Cargo.toml index ffcc2f72e2..2ccb65cdb4 100644 --- a/crates/bevy_mesh/Cargo.toml +++ b/crates/bevy_mesh/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_mesh" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides mesh types for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -13,15 +13,13 @@ keywords = ["bevy"] bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ - "bevy", -] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } bevy_mikktspace = { path = "../bevy_mikktspace", version = "0.16.0-dev" } bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", "serialize", ] } diff --git a/crates/bevy_mesh/src/index.rs b/crates/bevy_mesh/src/index.rs index d9593543a8..d2497e2c50 100644 --- a/crates/bevy_mesh/src/index.rs +++ b/crates/bevy_mesh/src/index.rs @@ -70,6 +70,7 @@ pub enum MeshTrianglesError { /// /// It describes the order in which the vertex attributes should be joined into faces. #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub enum Indices { U16(Vec), U32(Vec), diff --git a/crates/bevy_mesh/src/lib.rs b/crates/bevy_mesh/src/lib.rs index 83bc30df35..58702d7d8b 100644 --- a/crates/bevy_mesh/src/lib.rs +++ b/crates/bevy_mesh/src/lib.rs @@ -17,6 +17,7 @@ pub use mesh::*; pub use mikktspace::*; pub use primitives::*; pub use vertex::*; +pub use wgpu_types::VertexFormat; bitflags! { /// Our base mesh pipeline key bits start from the highest bit and go diff --git a/crates/bevy_mesh/src/mesh.rs b/crates/bevy_mesh/src/mesh.rs index a07924df2d..e4868dbf69 100644 --- a/crates/bevy_mesh/src/mesh.rs +++ b/crates/bevy_mesh/src/mesh.rs @@ -85,34 +85,35 @@ pub const VERTEX_ATTRIBUTE_BUFFER_ID: u64 = 10; /// ## Common points of confusion /// /// - UV maps in Bevy start at the top-left, see [`ATTRIBUTE_UV_0`](Mesh::ATTRIBUTE_UV_0), -/// other APIs can have other conventions, `OpenGL` starts at bottom-left. +/// other APIs can have other conventions, `OpenGL` starts at bottom-left. /// - It is possible and sometimes useful for multiple vertices to have the same -/// [position attribute](Mesh::ATTRIBUTE_POSITION) value, -/// it's a common technique in 3D modeling for complex UV mapping or other calculations. +/// [position attribute](Mesh::ATTRIBUTE_POSITION) value, +/// it's a common technique in 3D modeling for complex UV mapping or other calculations. /// - Bevy performs frustum culling based on the `Aabb` of meshes, which is calculated -/// and added automatically for new meshes only. If a mesh is modified, the entity's `Aabb` -/// needs to be updated manually or deleted so that it is re-calculated. +/// and added automatically for new meshes only. If a mesh is modified, the entity's `Aabb` +/// needs to be updated manually or deleted so that it is re-calculated. /// /// ## Use with `StandardMaterial` /// /// To render correctly with `StandardMaterial`, a mesh needs to have properly defined: /// - [`UVs`](Mesh::ATTRIBUTE_UV_0): Bevy needs to know how to map a texture onto the mesh -/// (also true for `ColorMaterial`). +/// (also true for `ColorMaterial`). /// - [`Normals`](Mesh::ATTRIBUTE_NORMAL): Bevy needs to know how light interacts with your mesh. -/// [0.0, 0.0, 1.0] is very common for simple flat meshes on the XY plane, -/// because simple meshes are smooth and they don't require complex light calculations. +/// [0.0, 0.0, 1.0] is very common for simple flat meshes on the XY plane, +/// because simple meshes are smooth and they don't require complex light calculations. /// - Vertex winding order: by default, `StandardMaterial.cull_mode` is `Some(Face::Back)`, -/// which means that Bevy would *only* render the "front" of each triangle, which -/// is the side of the triangle from where the vertices appear in a *counter-clockwise* order. +/// which means that Bevy would *only* render the "front" of each triangle, which +/// is the side of the triangle from where the vertices appear in a *counter-clockwise* order. #[derive(Asset, Debug, Clone, Reflect)] +#[reflect(Clone)] pub struct Mesh { - #[reflect(ignore)] + #[reflect(ignore, clone)] primitive_topology: PrimitiveTopology, /// `std::collections::BTreeMap` with all defined vertex attributes (Positions, Normals, ...) /// for this mesh. Attribute ids to attribute values. /// Uses a [`BTreeMap`] because, unlike `HashMap`, it has a defined iteration order, /// which allows easy stable `VertexBuffers` (i.e. same buffer order) - #[reflect(ignore)] + #[reflect(ignore, clone)] attributes: BTreeMap, indices: Option, morph_targets: Option>, @@ -881,7 +882,7 @@ impl Mesh { "mesh transform scale cannot be zero on more than one axis" ); - if let Some(VertexAttributeValues::Float32x3(ref mut positions)) = + if let Some(VertexAttributeValues::Float32x3(positions)) = self.attribute_mut(Mesh::ATTRIBUTE_POSITION) { // Apply scale, rotation, and translation to vertex positions @@ -898,7 +899,7 @@ impl Mesh { return; } - if let Some(VertexAttributeValues::Float32x3(ref mut normals)) = + if let Some(VertexAttributeValues::Float32x3(normals)) = self.attribute_mut(Mesh::ATTRIBUTE_NORMAL) { // Transform normals, taking into account non-uniform scaling and rotation @@ -909,13 +910,16 @@ impl Mesh { }); } - if let Some(VertexAttributeValues::Float32x3(ref mut tangents)) = + if let Some(VertexAttributeValues::Float32x4(tangents)) = self.attribute_mut(Mesh::ATTRIBUTE_TANGENT) { // Transform tangents, taking into account non-uniform scaling and rotation tangents.iter_mut().for_each(|tangent| { + let handedness = tangent[3]; let scaled_tangent = Vec3::from_slice(tangent) * transform.scale; - *tangent = (transform.rotation * scaled_tangent.normalize_or_zero()).to_array(); + *tangent = (transform.rotation * scaled_tangent.normalize_or_zero()) + .extend(handedness) + .to_array(); }); } } @@ -936,7 +940,7 @@ impl Mesh { return; } - if let Some(VertexAttributeValues::Float32x3(ref mut positions)) = + if let Some(VertexAttributeValues::Float32x3(positions)) = self.attribute_mut(Mesh::ATTRIBUTE_POSITION) { // Apply translation to vertex positions @@ -958,7 +962,7 @@ impl Mesh { /// /// `Aabb` of entities with modified mesh are not updated automatically. pub fn rotate_by(&mut self, rotation: Quat) { - if let Some(VertexAttributeValues::Float32x3(ref mut positions)) = + if let Some(VertexAttributeValues::Float32x3(positions)) = self.attribute_mut(Mesh::ATTRIBUTE_POSITION) { // Apply rotation to vertex positions @@ -972,7 +976,7 @@ impl Mesh { return; } - if let Some(VertexAttributeValues::Float32x3(ref mut normals)) = + if let Some(VertexAttributeValues::Float32x3(normals)) = self.attribute_mut(Mesh::ATTRIBUTE_NORMAL) { // Transform normals @@ -981,12 +985,15 @@ impl Mesh { }); } - if let Some(VertexAttributeValues::Float32x3(ref mut tangents)) = + if let Some(VertexAttributeValues::Float32x4(tangents)) = self.attribute_mut(Mesh::ATTRIBUTE_TANGENT) { // Transform tangents tangents.iter_mut().for_each(|tangent| { - *tangent = (rotation * Vec3::from_slice(tangent).normalize_or_zero()).to_array(); + let handedness = tangent[3]; + *tangent = (rotation * Vec3::from_slice(tangent).normalize_or_zero()) + .extend(handedness) + .to_array(); }); } } @@ -1010,7 +1017,7 @@ impl Mesh { "mesh transform scale cannot be zero on more than one axis" ); - if let Some(VertexAttributeValues::Float32x3(ref mut positions)) = + if let Some(VertexAttributeValues::Float32x3(positions)) = self.attribute_mut(Mesh::ATTRIBUTE_POSITION) { // Apply scale to vertex positions @@ -1024,7 +1031,7 @@ impl Mesh { return; } - if let Some(VertexAttributeValues::Float32x3(ref mut normals)) = + if let Some(VertexAttributeValues::Float32x3(normals)) = self.attribute_mut(Mesh::ATTRIBUTE_NORMAL) { // Transform normals, taking into account non-uniform scaling @@ -1033,13 +1040,17 @@ impl Mesh { }); } - if let Some(VertexAttributeValues::Float32x3(ref mut tangents)) = + if let Some(VertexAttributeValues::Float32x4(tangents)) = self.attribute_mut(Mesh::ATTRIBUTE_TANGENT) { // Transform tangents, taking into account non-uniform scaling tangents.iter_mut().for_each(|tangent| { + let handedness = tangent[3]; let scaled_tangent = Vec3::from_slice(tangent) * scale; - *tangent = scaled_tangent.normalize_or_zero().to_array(); + *tangent = scaled_tangent + .normalize_or_zero() + .extend(handedness) + .to_array(); }); } } @@ -1096,7 +1107,7 @@ impl Mesh { /// Normalize joint weights so they sum to 1. pub fn normalize_joint_weights(&mut self) { if let Some(joints) = self.attribute_mut(Self::ATTRIBUTE_JOINT_WEIGHT) { - let VertexAttributeValues::Float32x4(ref mut joints) = joints else { + let VertexAttributeValues::Float32x4(joints) = joints else { panic!("unexpected joint weight format"); }; diff --git a/crates/bevy_mesh/src/morph.rs b/crates/bevy_mesh/src/morph.rs index 0c2c2c948c..a8ff3be037 100644 --- a/crates/bevy_mesh/src/morph.rs +++ b/crates/bevy_mesh/src/morph.rs @@ -5,7 +5,6 @@ use bevy_image::Image; use bevy_math::Vec3; use bevy_reflect::prelude::*; use bytemuck::{Pod, Zeroable}; -use core::iter; use thiserror::Error; use wgpu_types::{Extent3d, TextureDimension, TextureFormat}; @@ -77,7 +76,7 @@ impl MorphTargetImage { buffer.extend_from_slice(bytemuck::bytes_of(&to_add)); } // Pad each layer so that they fit width * height - buffer.extend(iter::repeat(0).take(padding as usize * size_of::())); + buffer.extend(core::iter::repeat_n(0, padding as usize * size_of::())); debug_assert_eq!(buffer.len(), layer_byte_count); buffer }) @@ -112,7 +111,7 @@ impl MorphTargetImage { /// /// [morph targets]: https://en.wikipedia.org/wiki/Morph_target_animation #[derive(Reflect, Default, Debug, Clone, Component)] -#[reflect(Debug, Component, Default)] +#[reflect(Debug, Component, Default, Clone)] pub struct MorphWeights { weights: Vec, /// The first mesh primitive assigned to these weights @@ -157,7 +156,7 @@ impl MorphWeights { /// /// [morph targets]: https://en.wikipedia.org/wiki/Morph_target_animation #[derive(Reflect, Default, Debug, Clone, Component)] -#[reflect(Debug, Component, Default)] +#[reflect(Debug, Component, Default, Clone)] pub struct MeshMorphWeights { weights: Vec, } diff --git a/crates/bevy_mesh/src/primitives/dim2.rs b/crates/bevy_mesh/src/primitives/dim2.rs index 16440e9b00..e543f8a195 100644 --- a/crates/bevy_mesh/src/primitives/dim2.rs +++ b/crates/bevy_mesh/src/primitives/dim2.rs @@ -17,7 +17,7 @@ use wgpu_types::PrimitiveTopology; /// A builder used for creating a [`Mesh`] with a [`Circle`] shape. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct CircleMeshBuilder { /// The [`Circle`] shape. pub circle: Circle, @@ -101,7 +101,7 @@ impl From for Mesh { /// scaled to fit the bounding box of the shape, which would be good for packed textures only including the /// portion of the circle that is needed to display. #[derive(Copy, Clone, Debug, PartialEq, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] #[non_exhaustive] pub enum CircularMeshUvMode { /// Treats the shape as a mask over a circle of equal size and radius, @@ -123,7 +123,7 @@ impl Default for CircularMeshUvMode { /// The resulting mesh will have a UV-map such that the center of the circle is /// at the center of the texture. #[derive(Clone, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct CircularSectorMeshBuilder { /// The sector shape. pub sector: CircularSector, @@ -261,7 +261,7 @@ impl From for Mesh { /// The resulting mesh will have a UV-map such that the center of the circle is /// at the center of the texture. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct CircularSegmentMeshBuilder { /// The segment shape. pub segment: CircularSegment, @@ -408,7 +408,7 @@ impl From for Mesh { /// You must verify that the `vertices` are not concave when constructing this type. You can /// guarantee this by creating a [`ConvexPolygon`] first, then calling [`ConvexPolygon::mesh()`]. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Debug)] +#[reflect(Debug, Clone)] pub struct ConvexPolygonMeshBuilder { pub vertices: [Vec2; N], } @@ -459,7 +459,7 @@ impl From> for Mesh { /// A builder used for creating a [`Mesh`] with a [`RegularPolygon`] shape. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct RegularPolygonMeshBuilder { circumradius: f32, sides: u32, @@ -533,7 +533,7 @@ impl From for Mesh { /// A builder used for creating a [`Mesh`] with an [`Ellipse`] shape. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct EllipseMeshBuilder { /// The [`Ellipse`] shape. pub ellipse: Ellipse, @@ -638,7 +638,7 @@ impl From for Mesh { /// A builder for creating a [`Mesh`] with an [`Annulus`] shape. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct AnnulusMeshBuilder { /// The [`Annulus`] shape. pub annulus: Annulus, @@ -771,7 +771,7 @@ impl From for Mesh { /// A builder for creating a [`Mesh`] with an [`Rhombus`] shape. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct RhombusMeshBuilder { half_diagonals: Vec2, } @@ -857,7 +857,7 @@ impl From for Mesh { /// A builder used for creating a [`Mesh`] with a [`Triangle2d`] shape. #[derive(Clone, Copy, Debug, Default, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct Triangle2dMeshBuilder { triangle: Triangle2d, } @@ -934,7 +934,7 @@ impl From for Mesh { /// A builder used for creating a [`Mesh`] with a [`Rectangle`] shape. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct RectangleMeshBuilder { half_size: Vec2, } @@ -1014,7 +1014,7 @@ impl From for Mesh { /// A builder used for creating a [`Mesh`] with a [`Capsule2d`] shape. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct Capsule2dMeshBuilder { /// The [`Capsule2d`] shape. pub capsule: Capsule2d, @@ -1176,7 +1176,7 @@ impl From for Mesh { #[cfg(test)] mod tests { use bevy_math::{prelude::Annulus, primitives::RegularPolygon, FloatOrd}; - use bevy_platform_support::collections::HashSet; + use bevy_platform::collections::HashSet; use crate::{Mesh, MeshBuilder, Meshable, VertexAttributeValues}; diff --git a/crates/bevy_mesh/src/primitives/dim3/capsule.rs b/crates/bevy_mesh/src/primitives/dim3/capsule.rs index 81c7f77306..f46ebce0d1 100644 --- a/crates/bevy_mesh/src/primitives/dim3/capsule.rs +++ b/crates/bevy_mesh/src/primitives/dim3/capsule.rs @@ -5,7 +5,7 @@ use bevy_reflect::prelude::*; /// Manner in which UV coordinates are distributed vertically. #[derive(Clone, Copy, Debug, Default, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub enum CapsuleUvProfile { /// UV space is distributed by how much of the capsule consists of the hemispheres. #[default] @@ -19,7 +19,7 @@ pub enum CapsuleUvProfile { /// A builder used for creating a [`Mesh`] with a [`Capsule3d`] shape. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct Capsule3dMeshBuilder { /// The [`Capsule3d`] shape. pub capsule: Capsule3d, diff --git a/crates/bevy_mesh/src/primitives/dim3/cone.rs b/crates/bevy_mesh/src/primitives/dim3/cone.rs index fde4425370..d06a57f832 100644 --- a/crates/bevy_mesh/src/primitives/dim3/cone.rs +++ b/crates/bevy_mesh/src/primitives/dim3/cone.rs @@ -5,7 +5,7 @@ use bevy_reflect::prelude::*; /// Anchoring options for [`ConeMeshBuilder`] #[derive(Debug, Copy, Clone, Default, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub enum ConeAnchor { #[default] /// Midpoint between the tip of the cone and the center of its base. @@ -18,7 +18,7 @@ pub enum ConeAnchor { /// A builder used for creating a [`Mesh`] with a [`Cone`] shape. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct ConeMeshBuilder { /// The [`Cone`] shape. pub cone: Cone, diff --git a/crates/bevy_mesh/src/primitives/dim3/conical_frustum.rs b/crates/bevy_mesh/src/primitives/dim3/conical_frustum.rs index a90e9f972d..8c69378c01 100644 --- a/crates/bevy_mesh/src/primitives/dim3/conical_frustum.rs +++ b/crates/bevy_mesh/src/primitives/dim3/conical_frustum.rs @@ -5,7 +5,7 @@ use bevy_reflect::prelude::*; /// A builder used for creating a [`Mesh`] with a [`ConicalFrustum`] shape. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct ConicalFrustumMeshBuilder { /// The [`ConicalFrustum`] shape. pub frustum: ConicalFrustum, diff --git a/crates/bevy_mesh/src/primitives/dim3/cuboid.rs b/crates/bevy_mesh/src/primitives/dim3/cuboid.rs index 30689ab131..40a7cd45d4 100644 --- a/crates/bevy_mesh/src/primitives/dim3/cuboid.rs +++ b/crates/bevy_mesh/src/primitives/dim3/cuboid.rs @@ -5,7 +5,7 @@ use bevy_reflect::prelude::*; /// A builder used for creating a [`Mesh`] with a [`Cuboid`] shape. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct CuboidMeshBuilder { half_size: Vec3, } diff --git a/crates/bevy_mesh/src/primitives/dim3/cylinder.rs b/crates/bevy_mesh/src/primitives/dim3/cylinder.rs index 3c06caea84..7b1b45974e 100644 --- a/crates/bevy_mesh/src/primitives/dim3/cylinder.rs +++ b/crates/bevy_mesh/src/primitives/dim3/cylinder.rs @@ -5,7 +5,7 @@ use bevy_reflect::prelude::*; /// Anchoring options for [`CylinderMeshBuilder`] #[derive(Debug, Copy, Clone, Default, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub enum CylinderAnchor { #[default] /// Midpoint between the top and bottom caps of the cylinder @@ -18,7 +18,7 @@ pub enum CylinderAnchor { /// A builder used for creating a [`Mesh`] with a [`Cylinder`] shape. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct CylinderMeshBuilder { /// The [`Cylinder`] shape. pub cylinder: Cylinder, diff --git a/crates/bevy_mesh/src/primitives/dim3/plane.rs b/crates/bevy_mesh/src/primitives/dim3/plane.rs index 9a2621a163..fd892469be 100644 --- a/crates/bevy_mesh/src/primitives/dim3/plane.rs +++ b/crates/bevy_mesh/src/primitives/dim3/plane.rs @@ -5,7 +5,7 @@ use bevy_reflect::prelude::*; /// A builder used for creating a [`Mesh`] with a [`Plane3d`] shape. #[derive(Clone, Copy, Debug, Default, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct PlaneMeshBuilder { /// The [`Plane3d`] shape. pub plane: Plane3d, diff --git a/crates/bevy_mesh/src/primitives/dim3/sphere.rs b/crates/bevy_mesh/src/primitives/dim3/sphere.rs index 686887b13e..6ae8eec5ed 100644 --- a/crates/bevy_mesh/src/primitives/dim3/sphere.rs +++ b/crates/bevy_mesh/src/primitives/dim3/sphere.rs @@ -21,7 +21,7 @@ pub enum IcosphereError { /// A type of sphere mesh. #[derive(Clone, Copy, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub enum SphereKind { /// An icosphere, a spherical mesh that consists of similar sized triangles. Ico { @@ -49,7 +49,7 @@ impl Default for SphereKind { /// A builder used for creating a [`Mesh`] with an [`Sphere`] shape. #[derive(Clone, Copy, Debug, Default, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct SphereMeshBuilder { /// The [`Sphere`] shape. pub sphere: Sphere, diff --git a/crates/bevy_mesh/src/primitives/dim3/tetrahedron.rs b/crates/bevy_mesh/src/primitives/dim3/tetrahedron.rs index dcb88a758e..529805d9a6 100644 --- a/crates/bevy_mesh/src/primitives/dim3/tetrahedron.rs +++ b/crates/bevy_mesh/src/primitives/dim3/tetrahedron.rs @@ -6,7 +6,7 @@ use bevy_reflect::prelude::*; /// A builder used for creating a [`Mesh`] with a [`Tetrahedron`] shape. #[derive(Clone, Copy, Debug, Default, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct TetrahedronMeshBuilder { tetrahedron: Tetrahedron, } diff --git a/crates/bevy_mesh/src/primitives/dim3/torus.rs b/crates/bevy_mesh/src/primitives/dim3/torus.rs index 6d9c802e6c..6f370c1341 100644 --- a/crates/bevy_mesh/src/primitives/dim3/torus.rs +++ b/crates/bevy_mesh/src/primitives/dim3/torus.rs @@ -6,7 +6,7 @@ use core::ops::RangeInclusive; /// A builder used for creating a [`Mesh`] with a [`Torus`] shape. #[derive(Clone, Debug, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct TorusMeshBuilder { /// The [`Torus`] shape. pub torus: Torus, diff --git a/crates/bevy_mesh/src/primitives/dim3/triangle3d.rs b/crates/bevy_mesh/src/primitives/dim3/triangle3d.rs index f605dbeaf1..e35f272ab9 100644 --- a/crates/bevy_mesh/src/primitives/dim3/triangle3d.rs +++ b/crates/bevy_mesh/src/primitives/dim3/triangle3d.rs @@ -5,7 +5,7 @@ use bevy_reflect::prelude::*; /// A builder used for creating a [`Mesh`] with a [`Triangle3d`] shape. #[derive(Clone, Copy, Debug, Default, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct Triangle3dMeshBuilder { triangle: Triangle3d, } diff --git a/crates/bevy_mesh/src/skinning.rs b/crates/bevy_mesh/src/skinning.rs index f55f51af10..53b93f9ff2 100644 --- a/crates/bevy_mesh/src/skinning.rs +++ b/crates/bevy_mesh/src/skinning.rs @@ -1,17 +1,27 @@ -use bevy_asset::{Asset, Handle}; +use bevy_asset::{AsAssetId, Asset, AssetId, Handle}; use bevy_ecs::{component::Component, entity::Entity, prelude::ReflectComponent}; use bevy_math::Mat4; use bevy_reflect::prelude::*; use core::ops::Deref; #[derive(Component, Debug, Default, Clone, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct SkinnedMesh { pub inverse_bindposes: Handle, #[entities] pub joints: Vec, } +impl AsAssetId for SkinnedMesh { + type Asset = SkinnedMeshInverseBindposes; + + // We implement this so that `AssetChanged` will work to pick up any changes + // to `SkinnedMeshInverseBindposes`. + fn as_asset_id(&self) -> AssetId { + self.inverse_bindposes.id() + } +} + #[derive(Asset, TypePath, Debug)] pub struct SkinnedMeshInverseBindposes(Box<[Mat4]>); diff --git a/crates/bevy_mesh/src/vertex.rs b/crates/bevy_mesh/src/vertex.rs index 253c04af45..949e355b4c 100644 --- a/crates/bevy_mesh/src/vertex.rs +++ b/crates/bevy_mesh/src/vertex.rs @@ -2,7 +2,7 @@ use alloc::sync::Arc; use bevy_derive::EnumVariantMeta; use bevy_ecs::resource::Resource; use bevy_math::Vec3; -use bevy_platform_support::collections::HashSet; +use bevy_platform::collections::HashSet; use bytemuck::cast_slice; use core::hash::{Hash, Hasher}; use thiserror::Error; diff --git a/crates/bevy_mikktspace/Cargo.toml b/crates/bevy_mikktspace/Cargo.toml index 0ab431aa8f..fbca931fe2 100644 --- a/crates/bevy_mikktspace/Cargo.toml +++ b/crates/bevy_mikktspace/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_mikktspace" version = "0.16.0-dev" -edition = "2021" +edition = "2024" authors = [ "Benjamin Wasty ", "David Harvey-Macaulay ", @@ -13,7 +13,7 @@ homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "Zlib AND (MIT OR Apache-2.0)" keywords = ["bevy", "3D", "graphics", "algorithm", "tangent"] -rust-version = "1.76.0" +rust-version = "1.85.0" [features] default = ["std"] @@ -22,7 +22,7 @@ std = ["glam/std"] libm = ["glam/libm", "dep:libm"] [dependencies] -glam = { version = "0.29.0", default-features = false } +glam = { version = "0.29.3", default-features = false } libm = { version = "0.2", default-features = false, optional = true } [[example]] diff --git a/crates/bevy_mikktspace/README.md b/crates/bevy_mikktspace/README.md index a9049ee1ba..6ad4a72a4f 100644 --- a/crates/bevy_mikktspace/README.md +++ b/crates/bevy_mikktspace/README.md @@ -6,7 +6,7 @@ [![Docs](https://docs.rs/bevy_mikktspace/badge.svg)](https://docs.rs/bevy_mikktspace/latest/bevy_mikktspace/) [![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) -This is a fork of [https://github.com/gltf-rs/mikktspace](https://github.com/gltf-rs/mikktspace), which in turn is a port of the Mikkelsen Tangent Space Algorithm reference implementation to Rust. It has been forked for use in the bevy game engine to be able to update maths crate dependencies in lock-step with bevy releases. It is vendored in the bevy repository itself as [crates/bevy_mikktspace](https://github.com/bevyengine/bevy/tree/main/crates/bevy_mikktspace). +This is a fork of [https://github.com/gltf-rs/mikktspace](https://github.com/gltf-rs/mikktspace), which in turn is a port of the Mikkelsen Tangent Space Algorithm reference implementation to Rust. It has been forked for use in the bevy game engine to be able to update math crate dependencies in lock-step with bevy releases. It is vendored in the bevy repository itself as [crates/bevy_mikktspace](https://github.com/bevyengine/bevy/tree/main/crates/bevy_mikktspace). Port of the [Mikkelsen Tangent Space Algorithm](https://archive.blender.org/wiki/2015/index.php/Dev:Shading/Tangent_Space_Normal_Maps/) reference implementation. diff --git a/crates/bevy_mikktspace/examples/generate.rs b/crates/bevy_mikktspace/examples/generate.rs index 6ca3fa36df..62f6f10bfa 100644 --- a/crates/bevy_mikktspace/examples/generate.rs +++ b/crates/bevy_mikktspace/examples/generate.rs @@ -1,6 +1,11 @@ //! This example demonstrates how to generate a mesh. -#![allow(clippy::bool_assert_comparison, clippy::useless_conversion)] +#![allow( + clippy::bool_assert_comparison, + clippy::useless_conversion, + reason = "Crate auto-generated with many non-idiomatic decisions. See #7372 for details." +)] +#![expect(clippy::print_stdout, reason = "Allowed in examples.")] use glam::{Vec2, Vec3}; diff --git a/crates/bevy_mikktspace/src/generated.rs b/crates/bevy_mikktspace/src/generated.rs index a35e1d205f..a726eb5bc8 100644 --- a/crates/bevy_mikktspace/src/generated.rs +++ b/crates/bevy_mikktspace/src/generated.rs @@ -95,7 +95,7 @@ impl STSpace { // of the vertex shader, as explained earlier, then be sure to do this in the normal map sampler also. // Finally, beware of quad triangulations. If the normal map sampler doesn't use the same triangulation of // quads as your renderer then problems will occur since the interpolated tangent spaces will differ -// eventhough the vertex level tangent spaces match. This can be solved either by triangulating before +// even though the vertex level tangent spaces match. This can be solved either by triangulating before // sampling/exporting or by using the order-independent choice of diagonal for splitting quads suggested earlier. // However, this must be used both by the sampler and your tools/rendering pipeline. // internal structure @@ -136,7 +136,7 @@ pub struct SGroup { pub iNrFaces: i32, pub pFaceIndices: *mut i32, pub iVertexRepresentative: i32, - pub bOrientPreservering: bool, + pub bOrientPreserving: bool, } impl SGroup { @@ -145,7 +145,7 @@ impl SGroup { iNrFaces: 0, pFaceIndices: null_mut(), iVertexRepresentative: 0, - bOrientPreservering: false, + bOrientPreserving: false, } } } @@ -576,11 +576,11 @@ unsafe fn GenerateTSpaces( if (*pTS_out).iCounter == 1i32 { *pTS_out = AvgTSpace(pTS_out, &mut pSubGroupTspace[l]); (*pTS_out).iCounter = 2i32; - (*pTS_out).bOrient = (*pGroup).bOrientPreservering + (*pTS_out).bOrient = (*pGroup).bOrientPreserving } else { *pTS_out = pSubGroupTspace[l]; (*pTS_out).iCounter = 1i32; - (*pTS_out).bOrient = (*pGroup).bOrientPreservering + (*pTS_out).bOrient = (*pGroup).bOrientPreserving } i += 1 } @@ -838,7 +838,7 @@ unsafe fn Build4RuleGroups( *fresh2 = ptr::from_mut(&mut *pGroups.offset(iNrActiveGroups as isize)); (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]) .iVertexRepresentative = vert_index; - (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).bOrientPreservering = + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).bOrientPreserving = (*pTriInfos.offset(f as isize)).iFlag & 8i32 != 0i32; (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).iNrFaces = 0i32; let ref mut fresh3 = @@ -927,7 +927,7 @@ unsafe fn AssignRecur( && (*pMyTriInfo).AssignedGroup[2usize].is_null() { (*pMyTriInfo).iFlag &= !8i32; - (*pMyTriInfo).iFlag |= if (*pGroup).bOrientPreservering { + (*pMyTriInfo).iFlag |= if (*pGroup).bOrientPreserving { 8i32 } else { 0i32 @@ -939,7 +939,7 @@ unsafe fn AssignRecur( } else { false }; - if bOrient != (*pGroup).bOrientPreservering { + if bOrient != (*pGroup).bOrientPreserving { return false; } AddTriToGroup(pGroup, iMyTriIndex); diff --git a/crates/bevy_mikktspace/tests/regression_test.rs b/crates/bevy_mikktspace/tests/regression_test.rs index a0632b76e2..bd6718ad39 100644 --- a/crates/bevy_mikktspace/tests/regression_test.rs +++ b/crates/bevy_mikktspace/tests/regression_test.rs @@ -2,7 +2,8 @@ #![expect( clippy::bool_assert_comparison, clippy::semicolon_if_nothing_returned, - clippy::useless_conversion + clippy::useless_conversion, + reason = "Crate auto-generated with many non-idiomatic decisions. See #7372 for details." )] use bevy_mikktspace::{generate_tangents, Geometry}; diff --git a/crates/bevy_pbr/Cargo.toml b/crates/bevy_pbr/Cargo.toml index bf568885e7..82642812b4 100644 --- a/crates/bevy_pbr/Cargo.toml +++ b/crates/bevy_pbr/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_pbr" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Adds PBR rendering to Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -18,7 +18,6 @@ experimental_pbr_pcss = [] pbr_specular_textures = [] shader_format_glsl = ["bevy_render/shader_format_glsl"] trace = ["bevy_render/trace"] -ios_simulator = ["bevy_render/ios_simulator"] # Enables the meshlet renderer for dense high-poly scenes (experimental) meshlet = ["dep:lz4_flex", "dep:range-alloc", "dep:half", "dep:bevy_tasks"] # Enables processing meshes into meshlet meshes @@ -37,18 +36,17 @@ bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } bevy_color = { path = "../bevy_color", version = "0.16.0-dev" } bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.16.0-dev" } bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.16.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ - "bevy", -] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", optional = true } bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } bevy_window = { path = "../bevy_window", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", ] } @@ -65,7 +63,7 @@ range-alloc = { version = "0.1.3", optional = true } half = { version = "2", features = ["bytemuck"], optional = true } meshopt = { version = "0.4.1", optional = true } metis = { version = "0.2", optional = true } -itertools = { version = "0.13", optional = true } +itertools = { version = "0.14", optional = true } bitvec = { version = "1", optional = true } # direct dependency required for derive macro bytemuck = { version = "1", features = ["derive", "must_cast"] } @@ -74,6 +72,7 @@ smallvec = "1.6" nonmax = "0.5" static_assertions = "1" tracing = { version = "0.1", default-features = false, features = ["std"] } +offset-allocator = "0.2" [lints] workspace = true diff --git a/crates/bevy_pbr/src/atmosphere/aerial_view_lut.wgsl b/crates/bevy_pbr/src/atmosphere/aerial_view_lut.wgsl index c353298458..f7ba0ecb60 100644 --- a/crates/bevy_pbr/src/atmosphere/aerial_view_lut.wgsl +++ b/crates/bevy_pbr/src/atmosphere/aerial_view_lut.wgsl @@ -55,12 +55,9 @@ fn main(@builtin(global_invocation_id) idx: vec3) { break; } } - // We only have one channel to store transmittance, so we store the mean - let mean_transmittance = (throughput.r + throughput.g + throughput.b) / 3.0; // Store in log space to allow linear interpolation of exponential values between slices - let log_transmittance = -log(max(mean_transmittance, 1e-6)); // Avoid log(0) let log_inscattering = log(max(total_inscattering, vec3(1e-6))); - textureStore(aerial_view_lut_out, vec3(vec2(idx.xy), slice_i), vec4(log_inscattering, log_transmittance)); + textureStore(aerial_view_lut_out, vec3(vec2(idx.xy), slice_i), vec4(log_inscattering, 0.0)); } } diff --git a/crates/bevy_pbr/src/atmosphere/functions.wgsl b/crates/bevy_pbr/src/atmosphere/functions.wgsl index ffe3859b3f..c1f02fc921 100644 --- a/crates/bevy_pbr/src/atmosphere/functions.wgsl +++ b/crates/bevy_pbr/src/atmosphere/functions.wgsl @@ -49,7 +49,7 @@ const ROOT_2: f32 = 1.41421356; // √2 // the exponential falloff of atmospheric density. const MIDPOINT_RATIO: f32 = 0.3; -// LUT UV PARAMATERIZATIONS +// LUT UV PARAMETERIZATIONS fn unit_to_sub_uvs(val: vec2, resolution: vec2) -> vec2 { return (val + 0.5f / resolution) * (resolution / (resolution + 1.0f)); @@ -118,6 +118,27 @@ fn sample_transmittance_lut(r: f32, mu: f32) -> vec3 { return textureSampleLevel(transmittance_lut, transmittance_lut_sampler, uv, 0.0).rgb; } +// NOTICE: This function is copyrighted by Eric Bruneton and INRIA, and falls +// under the license reproduced in bruneton_functions.wgsl (variant of MIT license) +// +// FIXME: this function should be in bruneton_functions.wgsl, but because naga_oil doesn't +// support cyclic imports it's stuck here +fn sample_transmittance_lut_segment(r: f32, mu: f32, t: f32) -> vec3 { + let r_t = get_local_r(r, mu, t); + let mu_t = clamp((r * mu + t) / r_t, -1.0, 1.0); + + if ray_intersects_ground(r, mu) { + return min( + sample_transmittance_lut(r_t, -mu_t) / sample_transmittance_lut(r, -mu), + vec3(1.0) + ); + } else { + return min( + sample_transmittance_lut(r, mu) / sample_transmittance_lut(r_t, mu_t), vec3(1.0) + ); + } +} + fn sample_multiscattering_lut(r: f32, mu: f32) -> vec3 { let uv = multiscattering_lut_r_mu_to_uv(r, mu); return textureSampleLevel(multiscattering_lut, multiscattering_lut_sampler, uv, 0.0).rgb; @@ -130,23 +151,31 @@ fn sample_sky_view_lut(r: f32, ray_dir_as: vec3) -> vec3 { return textureSampleLevel(sky_view_lut, sky_view_lut_sampler, uv, 0.0).rgb; } +fn ndc_to_camera_dist(ndc: vec3) -> f32 { + let view_pos = view.view_from_clip * vec4(ndc, 1.0); + let t = length(view_pos.xyz / view_pos.w) * settings.scene_units_to_m; + return t; +} + // RGB channels: total inscattered light along the camera ray to the current sample. // A channel: average transmittance across all wavelengths to the current sample. -fn sample_aerial_view_lut(uv: vec2, depth: f32) -> vec4 { - let view_pos = view.view_from_clip * vec4(uv_to_ndc(uv), depth, 1.0); - let dist = length(view_pos.xyz / view_pos.w) * settings.scene_units_to_m; +fn sample_aerial_view_lut(uv: vec2, t: f32) -> vec3 { let t_max = settings.aerial_view_lut_max_distance; let num_slices = f32(settings.aerial_view_lut_size.z); - // Offset the W coordinate by -0.5 over the max distance in order to - // align sampling position with slice boundaries, since each texel - // stores the integral over its entire slice - let uvw = vec3(uv, saturate(dist / t_max - 0.5 / num_slices)); + // Each texel stores the value of the scattering integral over the whole slice, + // which requires us to offset the w coordinate by half a slice. For + // example, if we wanted the value of the integral at the boundary between slices, + // we'd need to sample at the center of the previous slice, and vice-versa for + // sampling in the center of a slice. + let uvw = vec3(uv, saturate(t / t_max - 0.5 / num_slices)); let sample = textureSampleLevel(aerial_view_lut, aerial_view_lut_sampler, uvw, 0.0); - // Treat the first slice specially since there is 0 scattering at the camera - let delta_slice = t_max / num_slices; - let fade = saturate(dist / delta_slice); + // Since sampling anywhere between w=0 and w=t_slice will clamp to the first slice, + // we need to do a linear step over the first slice towards zero at the camera's + // position to recover the correct integral value. + let t_slice = t_max / num_slices; + let fade = saturate(t / t_slice); // Recover the values from log space - return exp(sample) * fade; + return exp(sample.rgb) * fade; } // PHASE FUNCTIONS @@ -248,11 +277,11 @@ fn sample_local_inscattering(local_atmosphere: AtmosphereSample, ray_dir: vec3, transmittance: vec3) -> vec3 { +fn sample_sun_radiance(ray_dir_ws: vec3) -> vec3 { let r = view_radius(); let mu_view = ray_dir_ws.y; let shadow_factor = f32(!ray_intersects_ground(r, mu_view)); - var sun_illuminance = vec3(0.0); + var sun_radiance = vec3(0.0); for (var light_i: u32 = 0u; light_i < lights.n_directional_lights; light_i++) { let light = &lights.directional_lights[light_i]; let neg_LdotV = dot((*light).direction_to_light, ray_dir_ws); @@ -260,9 +289,9 @@ fn sample_sun_illuminance(ray_dir_ws: vec3, transmittance: vec3) -> ve let pixel_size = fwidth(angle_to_sun); let factor = smoothstep(0.0, -pixel_size * ROOT_2, angle_to_sun - SUN_ANGULAR_SIZE * 0.5); let sun_solid_angle = (SUN_ANGULAR_SIZE * SUN_ANGULAR_SIZE) * 4.0 * FRAC_PI; - sun_illuminance += ((*light).color.rgb / sun_solid_angle) * factor * shadow_factor; + sun_radiance += ((*light).color.rgb / sun_solid_angle) * factor * shadow_factor; } - return sun_illuminance * transmittance * view.exposure; + return sun_radiance; } // TRANSFORM UTILITIES diff --git a/crates/bevy_pbr/src/atmosphere/mod.rs b/crates/bevy_pbr/src/atmosphere/mod.rs index 2a2a1d8177..e7f17f0e1e 100644 --- a/crates/bevy_pbr/src/atmosphere/mod.rs +++ b/crates/bevy_pbr/src/atmosphere/mod.rs @@ -25,6 +25,10 @@ //! at once is untested, and might not be physically accurate. These may be //! integrated into a single module in the future. //! +//! On web platforms, atmosphere rendering will look slightly different. Specifically, when calculating how light travels +//! through the atmosphere, we use a simpler averaging technique instead of the more +//! complex blending operations. This difference will be resolved for WebGPU in a future release. +//! //! [Shadertoy]: https://www.shadertoy.com/view/slSXRW //! //! [Unreal Engine Implementation]: https://github.com/sebh/UnrealEngineSkyAtmosphere @@ -36,13 +40,13 @@ use bevy_app::{App, Plugin}; use bevy_asset::load_internal_asset; use bevy_core_pipeline::core_3d::graph::Node3d; use bevy_ecs::{ - component::{require, Component}, + component::Component, query::{Changed, QueryItem, With}, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{lifetimeless::Read, Query}, }; use bevy_math::{UVec2, UVec3, Vec3}; -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ extract_component::UniformComponentPlugin, render_resource::{DownlevelFlags, ShaderType, SpecializedRenderPipelines}, @@ -243,6 +247,7 @@ impl Plugin for AtmospherePlugin { /// high altitude. #[derive(Clone, Component, Reflect, ShaderType)] #[require(AtmosphereSettings)] +#[reflect(Clone, Default)] pub struct Atmosphere { /// Radius of the planet /// @@ -381,6 +386,7 @@ impl ExtractComponent for Atmosphere { /// scattered towards the camera at each point (RGB channels), alongside the average /// transmittance to that point (A channel). #[derive(Clone, Component, Reflect, ShaderType)] +#[reflect(Clone, Default)] pub struct AtmosphereSettings { /// The size of the transmittance LUT pub transmittance_lut_size: UVec2, diff --git a/crates/bevy_pbr/src/atmosphere/render_sky.wgsl b/crates/bevy_pbr/src/atmosphere/render_sky.wgsl index 97a0c47b51..e488656df4 100644 --- a/crates/bevy_pbr/src/atmosphere/render_sky.wgsl +++ b/crates/bevy_pbr/src/atmosphere/render_sky.wgsl @@ -2,10 +2,10 @@ types::{Atmosphere, AtmosphereSettings}, bindings::{atmosphere, view, atmosphere_transforms}, functions::{ - sample_transmittance_lut, sample_sky_view_lut, - direction_world_to_atmosphere, uv_to_ray_direction, - uv_to_ndc, sample_aerial_view_lut, view_radius, - sample_sun_illuminance, + sample_transmittance_lut, sample_transmittance_lut_segment, + sample_sky_view_lut, direction_world_to_atmosphere, + uv_to_ray_direction, uv_to_ndc, sample_aerial_view_lut, + view_radius, sample_sun_radiance, ndc_to_camera_dist }, }; #import bevy_render::view::View; @@ -18,22 +18,41 @@ @group(0) @binding(13) var depth_texture: texture_depth_2d; #endif -@fragment -fn main(in: FullscreenVertexOutput) -> @location(0) vec4 { - let depth = textureLoad(depth_texture, vec2(in.position.xy), 0); - if depth == 0.0 { - let ray_dir_ws = uv_to_ray_direction(in.uv); - let ray_dir_as = direction_world_to_atmosphere(ray_dir_ws.xyz); - - let r = view_radius(); - let mu = ray_dir_ws.y; - - let transmittance = sample_transmittance_lut(r, mu); - let inscattering = sample_sky_view_lut(r, ray_dir_as); - - let sun_illuminance = sample_sun_illuminance(ray_dir_ws.xyz, transmittance); - return vec4(inscattering + sun_illuminance, (transmittance.r + transmittance.g + transmittance.b) / 3.0); - } else { - return sample_aerial_view_lut(in.uv, depth); - } +struct RenderSkyOutput { + @location(0) inscattering: vec4, +#ifdef DUAL_SOURCE_BLENDING + @location(0) @second_blend_source transmittance: vec4, +#endif +} + +@fragment +fn main(in: FullscreenVertexOutput) -> RenderSkyOutput { + let depth = textureLoad(depth_texture, vec2(in.position.xy), 0); + + let ray_dir_ws = uv_to_ray_direction(in.uv); + let r = view_radius(); + let mu = ray_dir_ws.y; + + var transmittance: vec3; + var inscattering: vec3; + + let sun_radiance = sample_sun_radiance(ray_dir_ws.xyz); + + if depth == 0.0 { + let ray_dir_as = direction_world_to_atmosphere(ray_dir_ws.xyz); + transmittance = sample_transmittance_lut(r, mu); + inscattering += sample_sky_view_lut(r, ray_dir_as); + inscattering += sun_radiance * transmittance * view.exposure; + } else { + let t = ndc_to_camera_dist(vec3(uv_to_ndc(in.uv), depth)); + inscattering = sample_aerial_view_lut(in.uv, t); + transmittance = sample_transmittance_lut_segment(r, mu, t); + } +#ifdef DUAL_SOURCE_BLENDING + return RenderSkyOutput(vec4(inscattering, 0.0), vec4(transmittance, 1.0)); +#else + let mean_transmittance = (transmittance.r + transmittance.g + transmittance.b) / 3.0; + return RenderSkyOutput(vec4(inscattering, mean_transmittance)); +#endif + } diff --git a/crates/bevy_pbr/src/atmosphere/resources.rs b/crates/bevy_pbr/src/atmosphere/resources.rs index d37532e225..b872916619 100644 --- a/crates/bevy_pbr/src/atmosphere/resources.rs +++ b/crates/bevy_pbr/src/atmosphere/resources.rs @@ -326,6 +326,7 @@ pub(crate) struct RenderSkyPipelineId(pub CachedRenderPipelineId); pub(crate) struct RenderSkyPipelineKey { pub msaa_samples: u32, pub hdr: bool, + pub dual_source_blending: bool, } impl SpecializedRenderPipeline for RenderSkyBindGroupLayouts { @@ -340,6 +341,15 @@ impl SpecializedRenderPipeline for RenderSkyBindGroupLayouts { if key.hdr { shader_defs.push("TONEMAP_IN_SHADER".into()); } + if key.dual_source_blending { + shader_defs.push("DUAL_SOURCE_BLENDING".into()); + } + + let dst_factor = if key.dual_source_blending { + BlendFactor::Src1 + } else { + BlendFactor::SrcAlpha + }; RenderPipelineDescriptor { label: Some(format!("render_sky_pipeline_{}", key.msaa_samples).into()), @@ -367,7 +377,7 @@ impl SpecializedRenderPipeline for RenderSkyBindGroupLayouts { blend: Some(BlendState { color: BlendComponent { src_factor: BlendFactor::One, - dst_factor: BlendFactor::SrcAlpha, + dst_factor, operation: BlendOperation::Add, }, alpha: BlendComponent { @@ -388,6 +398,7 @@ pub(super) fn queue_render_sky_pipelines( pipeline_cache: Res, layouts: Res, mut specializer: ResMut>, + render_device: Res, mut commands: Commands, ) { for (entity, camera, msaa) in &views { @@ -397,6 +408,9 @@ pub(super) fn queue_render_sky_pipelines( RenderSkyPipelineKey { msaa_samples: msaa.samples(), hdr: camera.hdr, + dual_source_blending: render_device + .features() + .contains(WgpuFeatures::DUAL_SOURCE_BLENDING), }, ); commands.entity(entity).insert(RenderSkyPipelineId(id)); diff --git a/crates/bevy_pbr/src/cluster/assign.rs b/crates/bevy_pbr/src/cluster/assign.rs index 36a4aadfb3..1b7b3563d7 100644 --- a/crates/bevy_pbr/src/cluster/assign.rs +++ b/crates/bevy_pbr/src/cluster/assign.rs @@ -496,7 +496,7 @@ pub(crate) fn assign_objects_to_clusters( // initialize empty cluster bounding spheres cluster_aabb_spheres.clear(); - cluster_aabb_spheres.extend(core::iter::repeat(None).take(cluster_count)); + cluster_aabb_spheres.extend(core::iter::repeat_n(None, cluster_count)); // Calculate the x/y/z cluster frustum planes in view space let mut x_planes = Vec::with_capacity(clusters.dimensions.x as usize + 1); @@ -845,7 +845,7 @@ pub(crate) fn assign_objects_to_clusters( } } - ClusterableObjectType::Decal { .. } => { + ClusterableObjectType::Decal => { // Decals currently affect all clusters in their // bounding sphere. // diff --git a/crates/bevy_pbr/src/cluster/mod.rs b/crates/bevy_pbr/src/cluster/mod.rs index d8a1466320..3113333be3 100644 --- a/crates/bevy_pbr/src/cluster/mod.rs +++ b/crates/bevy_pbr/src/cluster/mod.rs @@ -5,7 +5,7 @@ use core::num::NonZero; use bevy_core_pipeline::core_3d::Camera3d; use bevy_ecs::{ component::Component, - entity::{hash_map::EntityHashMap, Entity}, + entity::{Entity, EntityHashMap}, query::{With, Without}, reflect::ReflectComponent, resource::Resource, @@ -13,7 +13,7 @@ use bevy_ecs::{ world::{FromWorld, World}, }; use bevy_math::{uvec4, AspectRatio, UVec2, UVec3, UVec4, Vec3Swizzles as _, Vec4}; -use bevy_platform_support::collections::HashSet; +use bevy_platform::collections::HashSet; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ camera::Camera, @@ -66,6 +66,7 @@ const CLUSTER_COUNT_MASK: u32 = (1 << CLUSTER_COUNT_SIZE) - 1; /// Configure the far z-plane mode used for the furthest depth slice for clustered forward /// rendering #[derive(Debug, Copy, Clone, Reflect)] +#[reflect(Clone)] pub enum ClusterFarZMode { /// Calculate the required maximum z-depth based on currently visible /// clusterable objects. Makes better use of available clusters, speeding @@ -78,7 +79,7 @@ pub enum ClusterFarZMode { /// Configure the depth-slicing strategy for clustered forward rendering #[derive(Debug, Copy, Clone, Reflect)] -#[reflect(Default)] +#[reflect(Default, Clone)] pub struct ClusterZConfig { /// Far `Z` plane of the first depth slice pub first_slice_depth: f32, @@ -88,7 +89,7 @@ pub struct ClusterZConfig { /// Configuration of the clustering strategy for clustered forward rendering #[derive(Debug, Copy, Clone, Component, Reflect)] -#[reflect(Component, Debug, Default)] +#[reflect(Component, Debug, Default, Clone)] pub enum ClusterConfig { /// Disable cluster calculations for this view None, diff --git a/crates/bevy_pbr/src/components.rs b/crates/bevy_pbr/src/components.rs index d5418910eb..fca31b3b03 100644 --- a/crates/bevy_pbr/src/components.rs +++ b/crates/bevy_pbr/src/components.rs @@ -1,6 +1,6 @@ use bevy_derive::{Deref, DerefMut}; use bevy_ecs::component::Component; -use bevy_ecs::entity::{hash_map::EntityHashMap, Entity}; +use bevy_ecs::entity::{Entity, EntityHashMap}; use bevy_ecs::reflect::ReflectComponent; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::sync_world::MainEntity; @@ -9,23 +9,23 @@ use bevy_render::sync_world::MainEntity; /// This component contains all mesh entities visible from the current light view. /// The collection is updated automatically by [`crate::SimulationLightSystems`]. #[derive(Component, Clone, Debug, Default, Reflect, Deref, DerefMut)] -#[reflect(Component, Debug, Default)] +#[reflect(Component, Debug, Default, Clone)] pub struct VisibleMeshEntities { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub entities: Vec, } #[derive(Component, Clone, Debug, Default, Reflect, Deref, DerefMut)] -#[reflect(Component, Debug, Default)] +#[reflect(Component, Debug, Default, Clone)] pub struct RenderVisibleMeshEntities { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub entities: Vec<(Entity, MainEntity)>, } #[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component, Debug, Default)] +#[reflect(Component, Debug, Default, Clone)] pub struct CubemapVisibleEntities { - #[reflect(ignore)] + #[reflect(ignore, clone)] data: [VisibleMeshEntities; 6], } @@ -48,9 +48,9 @@ impl CubemapVisibleEntities { } #[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component, Debug, Default)] +#[reflect(Component, Debug, Default, Clone)] pub struct RenderCubemapVisibleEntities { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub(crate) data: [RenderVisibleMeshEntities; 6], } @@ -73,17 +73,17 @@ impl RenderCubemapVisibleEntities { } #[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component)] +#[reflect(Component, Default, Clone)] pub struct CascadesVisibleEntities { /// Map of view entity to the visible entities for each cascade frustum. - #[reflect(ignore)] + #[reflect(ignore, clone)] pub entities: EntityHashMap>, } #[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component)] +#[reflect(Component, Default, Clone)] pub struct RenderCascadesVisibleEntities { /// Map of view entity to the visible entities for each cascade frustum. - #[reflect(ignore)] + #[reflect(ignore, clone)] pub entities: EntityHashMap>, } diff --git a/crates/bevy_pbr/src/decal/clustered.rs b/crates/bevy_pbr/src/decal/clustered.rs index 43edcd0bc3..5272bce80c 100644 --- a/crates/bevy_pbr/src/decal/clustered.rs +++ b/crates/bevy_pbr/src/decal/clustered.rs @@ -20,17 +20,17 @@ use bevy_app::{App, Plugin}; use bevy_asset::{load_internal_asset, weak_handle, AssetId, Handle}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - component::{require, Component}, - entity::{hash_map::EntityHashMap, Entity}, + component::Component, + entity::{Entity, EntityHashMap}, prelude::ReflectComponent, query::With, resource::Resource, - schedule::IntoSystemConfigs as _, + schedule::IntoScheduleConfigs as _, system::{Query, Res, ResMut}, }; use bevy_image::Image; use bevy_math::Mat4; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::Reflect; use bevy_render::{ extract_component::{ExtractComponent, ExtractComponentPlugin}, @@ -80,7 +80,7 @@ pub struct ClusteredDecalPlugin; /// used on WebGL 2, WebGPU, macOS, or iOS. Bevy's clustered decals can be used /// with forward or deferred rendering and don't require a prepass. #[derive(Component, Debug, Clone, Reflect, ExtractComponent)] -#[reflect(Component, Debug)] +#[reflect(Component, Debug, Clone)] #[require(Transform, Visibility, VisibilityClass)] #[component(on_add = view::add_visibility_class::)] pub struct ClusteredDecal { diff --git a/crates/bevy_pbr/src/decal/forward.rs b/crates/bevy_pbr/src/decal/forward.rs index 1229a688a9..2445c3e723 100644 --- a/crates/bevy_pbr/src/decal/forward.rs +++ b/crates/bevy_pbr/src/decal/forward.rs @@ -4,9 +4,12 @@ use crate::{ }; use bevy_app::{App, Plugin}; use bevy_asset::{load_internal_asset, weak_handle, Asset, Assets, Handle}; -use bevy_ecs::component::{require, Component}; +use bevy_ecs::component::Component; use bevy_math::{prelude::Rectangle, Quat, Vec2, Vec3}; use bevy_reflect::{Reflect, TypePath}; +use bevy_render::render_asset::RenderAssets; +use bevy_render::render_resource::{AsBindGroupShaderType, ShaderType}; +use bevy_render::texture::GpuImage; use bevy_render::{ alpha::AlphaMode, mesh::{Mesh, Mesh3d, MeshBuilder, MeshVertexBufferLayoutRef, Meshable}, @@ -14,6 +17,7 @@ use bevy_render::{ AsBindGroup, CompareFunction, RenderPipelineDescriptor, Shader, SpecializedMeshPipelineError, }, + RenderDebugFlags, }; const FORWARD_DECAL_MESH_HANDLE: Handle = @@ -48,6 +52,7 @@ impl Plugin for ForwardDecalPlugin { app.add_plugins(MaterialPlugin::> { prepass_enabled: false, shadows_enabled: false, + debug_flags: RenderDebugFlags::default(), ..Default::default() }); } @@ -61,11 +66,11 @@ impl Plugin for ForwardDecalPlugin { /// # Usage Notes /// /// * Spawn this component on an entity with a [`crate::MeshMaterial3d`] component holding a [`ForwardDecalMaterial`]. -/// * Any camera rendering a forward decal must have the [`bevy_core_pipeline::DepthPrepass`] component. +/// * Any camera rendering a forward decal must have the [`bevy_core_pipeline::prepass::DepthPrepass`] component. /// * Looking at forward decals at a steep angle can cause distortion. This can be mitigated by padding your decal's /// texture with extra transparent pixels on the edges. #[derive(Component, Reflect)] -#[require(Mesh3d(|| Mesh3d(FORWARD_DECAL_MESH_HANDLE)))] +#[require(Mesh3d(FORWARD_DECAL_MESH_HANDLE))] pub struct ForwardDecal; /// Type alias for an extended material with a [`ForwardDecalMaterialExt`] extension. @@ -84,16 +89,36 @@ pub type ForwardDecalMaterial = ExtendedMaterial for ForwardDecalMaterialExt { + fn as_bind_group_shader_type( + &self, + _images: &RenderAssets, + ) -> ForwardDecalMaterialExtUniform { + ForwardDecalMaterialExtUniform { + inv_depth_fade_factor: 1.0 / self.depth_fade_factor.max(0.001), + } + } +} + impl MaterialExtension for ForwardDecalMaterialExt { fn alpha_mode() -> Option { Some(AlphaMode::Blend) diff --git a/crates/bevy_pbr/src/decal/forward_decal.wgsl b/crates/bevy_pbr/src/decal/forward_decal.wgsl index dbc6bbc1c4..ce24d57bf5 100644 --- a/crates/bevy_pbr/src/decal/forward_decal.wgsl +++ b/crates/bevy_pbr/src/decal/forward_decal.wgsl @@ -11,7 +11,7 @@ #import bevy_render::maths::project_onto @group(2) @binding(200) -var depth_fade_factor: f32; +var inv_depth_fade_factor: f32; struct ForwardDecalInformation { world_position: vec4, @@ -46,7 +46,7 @@ fn get_forward_decal_info(in: VertexOutput) -> ForwardDecalInformation { let uv = in.uv + delta_uv; let world_position = vec4(in.world_position.xyz + V * diff_depth_abs, in.world_position.w); - let alpha = saturate(1.0 - normal_depth * depth_fade_factor); + let alpha = saturate(1.0 - (normal_depth * inv_depth_fade_factor)); return ForwardDecalInformation(world_position, uv, alpha); } diff --git a/crates/bevy_pbr/src/deferred/mod.rs b/crates/bevy_pbr/src/deferred/mod.rs index 5738cdd25d..e40b3a940a 100644 --- a/crates/bevy_pbr/src/deferred/mod.rs +++ b/crates/bevy_pbr/src/deferred/mod.rs @@ -345,6 +345,10 @@ impl SpecializedRenderPipeline for DeferredLightingLayout { } else if shadow_filter_method == MeshPipelineKey::SHADOW_FILTER_METHOD_TEMPORAL { shader_defs.push("SHADOW_FILTER_METHOD_TEMPORAL".into()); } + if self.mesh_pipeline.binding_arrays_are_usable { + shader_defs.push("MULTIPLE_LIGHT_PROBES_IN_ARRAY".into()); + shader_defs.push("MULTIPLE_LIGHTMAPS_IN_ARRAY".into()); + } #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into()); @@ -432,28 +436,26 @@ pub fn prepare_deferred_lighting_pipelines( pipeline_cache: Res, mut pipelines: ResMut>, deferred_lighting_layout: Res, - views: Query< + views: Query<( + Entity, + &ExtractedView, + Option<&Tonemapping>, + Option<&DebandDither>, + Option<&ShadowFilteringMethod>, ( - Entity, - &ExtractedView, - Option<&Tonemapping>, - Option<&DebandDither>, - Option<&ShadowFilteringMethod>, - ( - Has, - Has, - Has, - ), - ( - Has, - Has, - Has, - ), - Has>, - Has>, + Has, + Has, + Has, ), - With, - >, + ( + Has, + Has, + Has, + Has, + ), + Has>, + Has>, + )>, ) { for ( entity, @@ -462,11 +464,19 @@ pub fn prepare_deferred_lighting_pipelines( dither, shadow_filter_method, (ssao, ssr, distance_fog), - (normal_prepass, depth_prepass, motion_vector_prepass), + (normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass), has_environment_maps, has_irradiance_volumes, ) in &views { + // If there is no deferred prepass, remove the old pipeline if there was + // one. This handles the case in which a view using deferred stops using + // it. + if !deferred_prepass { + commands.entity(entity).remove::(); + continue; + } + let mut view_key = MeshPipelineKey::from_hdr(view.hdr); if normal_prepass { diff --git a/crates/bevy_pbr/src/extended_material.rs b/crates/bevy_pbr/src/extended_material.rs index 17ea201561..e01dd0ff14 100644 --- a/crates/bevy_pbr/src/extended_material.rs +++ b/crates/bevy_pbr/src/extended_material.rs @@ -1,11 +1,15 @@ +use alloc::borrow::Cow; + use bevy_asset::{Asset, Handle}; use bevy_ecs::system::SystemParamItem; +use bevy_platform::{collections::HashSet, hash::FixedHasher}; use bevy_reflect::{impl_type_path, Reflect}; use bevy_render::{ alpha::AlphaMode, mesh::MeshVertexBufferLayoutRef, render_resource::{ - AsBindGroup, AsBindGroupError, BindGroupLayout, RenderPipelineDescriptor, Shader, + AsBindGroup, AsBindGroupError, BindGroupLayout, BindGroupLayoutEntry, BindlessDescriptor, + BindlessResourceType, BindlessSlabResourceLimit, RenderPipelineDescriptor, Shader, ShaderRef, SpecializedMeshPipelineError, UnpreparedBindGroup, }, renderer::RenderDevice, @@ -127,6 +131,7 @@ pub trait MaterialExtension: Asset + AsBindGroup + Clone + Sized { /// the `extended_material` example). #[derive(Asset, Clone, Debug, Reflect)] #[reflect(type_path = false)] +#[reflect(Clone)] pub struct ExtendedMaterial { pub base: B, pub extension: E, @@ -153,12 +158,24 @@ impl AsBindGroup for ExtendedMaterial { type Data = (::Data, ::Data); type Param = (::Param, ::Param); - fn bindless_slot_count() -> Option { - match (B::bindless_slot_count(), E::bindless_slot_count()) { - (Some(base_bindless_slot_count), Some(extension_bindless_slot_count)) => { - Some(base_bindless_slot_count.min(extension_bindless_slot_count)) + fn bindless_slot_count() -> Option { + // We only enable bindless if both the base material and its extension + // are bindless. If we do enable bindless, we choose the smaller of the + // two slab size limits. + match (B::bindless_slot_count()?, E::bindless_slot_count()?) { + (BindlessSlabResourceLimit::Auto, BindlessSlabResourceLimit::Auto) => { + Some(BindlessSlabResourceLimit::Auto) } - _ => None, + (BindlessSlabResourceLimit::Auto, BindlessSlabResourceLimit::Custom(limit)) + | (BindlessSlabResourceLimit::Custom(limit), BindlessSlabResourceLimit::Auto) => { + Some(BindlessSlabResourceLimit::Custom(limit)) + } + ( + BindlessSlabResourceLimit::Custom(base_limit), + BindlessSlabResourceLimit::Custom(extended_limit), + ) => Some(BindlessSlabResourceLimit::Custom( + base_limit.min(extended_limit), + )), } } @@ -167,11 +184,9 @@ impl AsBindGroup for ExtendedMaterial { layout: &BindGroupLayout, render_device: &RenderDevice, (base_param, extended_param): &mut SystemParamItem<'_, '_, Self::Param>, - mut force_no_bindless: bool, + mut force_non_bindless: bool, ) -> Result, AsBindGroupError> { - // Only allow bindless mode if both the base material and the extension - // support it. - force_no_bindless = force_no_bindless || Self::bindless_slot_count().is_none(); + force_non_bindless = force_non_bindless || Self::bindless_slot_count().is_none(); // add together the bindings of the base material and the user material let UnpreparedBindGroup { @@ -182,14 +197,14 @@ impl AsBindGroup for ExtendedMaterial { layout, render_device, base_param, - force_no_bindless, + force_non_bindless, )?; let extended_bindgroup = E::unprepared_bind_group( &self.extension, layout, render_device, extended_param, - force_no_bindless, + force_non_bindless, )?; bindings.extend(extended_bindgroup.bindings.0); @@ -202,23 +217,73 @@ impl AsBindGroup for ExtendedMaterial { fn bind_group_layout_entries( render_device: &RenderDevice, - mut force_no_bindless: bool, - ) -> Vec + mut force_non_bindless: bool, + ) -> Vec where Self: Sized, { - // Only allow bindless mode if both the base material and the extension - // support it. - force_no_bindless = force_no_bindless || Self::bindless_slot_count().is_none(); + force_non_bindless = force_non_bindless || Self::bindless_slot_count().is_none(); - // add together the bindings of the standard material and the user material - let mut entries = B::bind_group_layout_entries(render_device, force_no_bindless); - entries.extend(E::bind_group_layout_entries( - render_device, - force_no_bindless, - )); + // Add together the bindings of the standard material and the user + // material, skipping duplicate bindings. Duplicate bindings will occur + // when bindless mode is on, because of the common bindless resource + // arrays, and we need to eliminate the duplicates or `wgpu` will + // complain. + let mut entries = vec![]; + let mut seen_bindings = HashSet::<_>::with_hasher(FixedHasher); + for entry in B::bind_group_layout_entries(render_device, force_non_bindless) + .into_iter() + .chain(E::bind_group_layout_entries(render_device, force_non_bindless).into_iter()) + { + if seen_bindings.insert(entry.binding) { + entries.push(entry); + } + } entries } + + fn bindless_descriptor() -> Option { + // We're going to combine the two bindless descriptors. + let base_bindless_descriptor = B::bindless_descriptor()?; + let extended_bindless_descriptor = E::bindless_descriptor()?; + + // Combining the buffers and index tables is straightforward. + + let mut buffers = base_bindless_descriptor.buffers.to_vec(); + let mut index_tables = base_bindless_descriptor.index_tables.to_vec(); + + buffers.extend(extended_bindless_descriptor.buffers.iter().cloned()); + index_tables.extend(extended_bindless_descriptor.index_tables.iter().cloned()); + + // Combining the resources is a little trickier because the resource + // array is indexed by bindless index, so we have to merge the two + // arrays, not just concatenate them. + let max_bindless_index = base_bindless_descriptor + .resources + .len() + .max(extended_bindless_descriptor.resources.len()); + let mut resources = Vec::with_capacity(max_bindless_index); + for bindless_index in 0..max_bindless_index { + // In the event of a conflicting bindless index, we choose the + // base's binding. + match base_bindless_descriptor.resources.get(bindless_index) { + None | Some(&BindlessResourceType::None) => resources.push( + extended_bindless_descriptor + .resources + .get(bindless_index) + .copied() + .unwrap_or(BindlessResourceType::None), + ), + Some(&resource_type) => resources.push(resource_type), + } + } + + Some(BindlessDescriptor { + resources: Cow::Owned(resources), + buffers: Cow::Owned(buffers), + index_tables: Cow::Owned(index_tables), + }) + } } impl Material for ExtendedMaterial { diff --git a/crates/bevy_pbr/src/fog.rs b/crates/bevy_pbr/src/fog.rs index 831ec6928c..21a89ccc70 100644 --- a/crates/bevy_pbr/src/fog.rs +++ b/crates/bevy_pbr/src/fog.rs @@ -47,7 +47,7 @@ use bevy_render::{extract_component::ExtractComponent, prelude::Camera}; /// [`StandardMaterial`](crate::StandardMaterial) instances via the `fog_enabled` flag. #[derive(Debug, Clone, Component, Reflect, ExtractComponent)] #[extract_component_filter(With)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct DistanceFog { /// The color of the fog effect. /// @@ -94,6 +94,7 @@ pub struct DistanceFog { /// - [`FogFalloff::from_visibility_contrast_color()`] /// - [`FogFalloff::from_visibility_contrast_colors()`] #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub enum FogFalloff { /// A linear fog falloff that grows in intensity between `start` and `end` distances. /// @@ -141,11 +142,11 @@ pub enum FogFalloff { /// ## Tips /// /// - Use the [`FogFalloff::from_visibility()`] convenience method to create an exponential falloff with the proper - /// density for a desired visibility distance in world units; + /// density for a desired visibility distance in world units; /// - It's not _unusual_ to have very large or very small values for the density, depending on the scene - /// scale. Typically, for scenes with objects in the scale of thousands of units, you might want density values - /// in the ballpark of `0.001`. Conversely, for really small scale scenes you might want really high values of - /// density; + /// scale. Typically, for scenes with objects in the scale of thousands of units, you might want density values + /// in the ballpark of `0.001`. Conversely, for really small scale scenes you might want really high values of + /// density; /// - Combine the `density` parameter with the [`DistanceFog`] `color`'s alpha channel for easier artistic control. /// /// ## Formula @@ -193,7 +194,7 @@ pub enum FogFalloff { /// ## Tips /// /// - Use the [`FogFalloff::from_visibility_squared()`] convenience method to create an exponential squared falloff - /// with the proper density for a desired visibility distance in world units; + /// with the proper density for a desired visibility distance in world units; /// - Combine the `density` parameter with the [`DistanceFog`] `color`'s alpha channel for easier artistic control. /// /// ## Formula @@ -239,8 +240,8 @@ pub enum FogFalloff { /// ## Tips /// /// - Use the [`FogFalloff::from_visibility_colors()`] or [`FogFalloff::from_visibility_color()`] convenience methods - /// to create an atmospheric falloff with the proper densities for a desired visibility distance in world units and - /// extinction and inscattering colors; + /// to create an atmospheric falloff with the proper densities for a desired visibility distance in world units and + /// extinction and inscattering colors; /// - Combine the atmospheric fog parameters with the [`DistanceFog`] `color`'s alpha channel for easier artistic control. /// /// ## Formula diff --git a/crates/bevy_pbr/src/lib.rs b/crates/bevy_pbr/src/lib.rs index 88403900ae..1810bc67eb 100644 --- a/crates/bevy_pbr/src/lib.rs +++ b/crates/bevy_pbr/src/lib.rs @@ -45,8 +45,6 @@ mod ssao; mod ssr; mod volumetric_fog; -use crate::material_bind_groups::FallbackBindlessResources; - use bevy_color::{Color, LinearRgba}; pub use atmosphere::*; @@ -59,6 +57,7 @@ pub use light::*; pub use light_probe::*; pub use lightmap::*; pub use material::*; +pub use material_bind_groups::*; pub use mesh_material::*; pub use parallax::*; pub use pbr_material::*; @@ -88,23 +87,38 @@ pub mod prelude { pub mod graph { use bevy_render::render_graph::RenderLabel; + /// Render graph nodes specific to 3D PBR rendering. #[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)] pub enum NodePbr { - /// Label for the shadow pass node. - ShadowPass, + /// Label for the shadow pass node that draws meshes that were visible + /// from the light last frame. + EarlyShadowPass, + /// Label for the shadow pass node that draws meshes that became visible + /// from the light this frame. + LateShadowPass, /// Label for the screen space ambient occlusion render node. ScreenSpaceAmbientOcclusion, DeferredLightingPass, /// Label for the volumetric lighting pass. VolumetricFog, - /// Label for the compute shader instance data building pass. + /// Label for the shader that transforms and culls meshes that were + /// visible last frame. EarlyGpuPreprocess, + /// Label for the shader that transforms and culls meshes that became + /// visible this frame. LateGpuPreprocess, /// Label for the screen space reflections pass. ScreenSpaceReflections, + /// Label for the node that builds indirect draw parameters for meshes + /// that were visible last frame. EarlyPrepassBuildIndirectParameters, + /// Label for the node that builds indirect draw parameters for meshes + /// that became visible this frame. LatePrepassBuildIndirectParameters, + /// Label for the node that builds indirect draw parameters for the main + /// rendering pass, containing all meshes that are visible this frame. MainBuildIndirectParameters, + ClearIndirectParametersMetadata, } } @@ -116,16 +130,14 @@ use bevy_ecs::prelude::*; use bevy_image::Image; use bevy_render::{ alpha::AlphaMode, - camera::{CameraUpdateSystem, Projection}, + camera::{sort_cameras, CameraUpdateSystem, Projection}, extract_component::ExtractComponentPlugin, extract_resource::ExtractResourcePlugin, - render_asset::prepare_assets, render_graph::RenderGraph, render_resource::Shader, sync_component::SyncComponentPlugin, - texture::GpuImage, view::VisibilitySystems, - ExtractSchedule, Render, RenderApp, RenderSet, + ExtractSchedule, Render, RenderApp, RenderDebugFlags, RenderSet, }; use bevy_transform::TransformSystem; @@ -182,6 +194,8 @@ pub struct PbrPlugin { /// This requires compute shader support and so will be forcibly disabled if /// the platform doesn't support those. pub use_gpu_instance_buffer_builder: bool, + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, } impl Default for PbrPlugin { @@ -190,6 +204,7 @@ impl Default for PbrPlugin { prepass_enabled: true, add_default_deferred_lighting_plugin: true, use_gpu_instance_buffer_builder: true, + debug_flags: RenderDebugFlags::default(), } } } @@ -333,9 +348,11 @@ impl Plugin for PbrPlugin { .add_plugins(( MeshRenderPlugin { use_gpu_instance_buffer_builder: self.use_gpu_instance_buffer_builder, + debug_flags: self.debug_flags, }, MaterialPlugin:: { prepass_enabled: self.prepass_enabled, + debug_flags: self.debug_flags, ..Default::default() }, ScreenSpaceAmbientOcclusionPlugin, @@ -423,7 +440,8 @@ impl Plugin for PbrPlugin { // NOTE: This MUST be scheduled AFTER the core renderer visibility check // because that resets entity `ViewVisibility` for the first view // which would override any results from this otherwise - .after(VisibilitySystems::CheckVisibility), + .after(VisibilitySystems::CheckVisibility) + .before(VisibilitySystems::MarkNewlyHiddenEntitiesInvisible), ), ); @@ -448,13 +466,20 @@ impl Plugin for PbrPlugin { // Extract the required data from the main world render_app - .add_systems(ExtractSchedule, (extract_clusters, extract_lights)) + .add_systems( + ExtractSchedule, + ( + extract_clusters, + extract_lights, + late_sweep_material_instances, + ), + ) .add_systems( Render, ( prepare_lights .in_set(RenderSet::ManageViews) - .after(prepare_assets::), + .after(sort_cameras), prepare_clusters.in_set(RenderSet::PrepareResources), ), ) @@ -467,11 +492,17 @@ impl Plugin for PbrPlugin { .add_observer(remove_light_view_entities); render_app.world_mut().add_observer(extracted_light_removed); - let shadow_pass_node = ShadowPassNode::new(render_app.world_mut()); + let early_shadow_pass_node = EarlyShadowPassNode::from_world(render_app.world_mut()); + let late_shadow_pass_node = LateShadowPassNode::from_world(render_app.world_mut()); let mut graph = render_app.world_mut().resource_mut::(); let draw_3d_graph = graph.get_sub_graph_mut(Core3d).unwrap(); - draw_3d_graph.add_node(NodePbr::ShadowPass, shadow_pass_node); - draw_3d_graph.add_node_edge(NodePbr::ShadowPass, Node3d::StartMainPass); + draw_3d_graph.add_node(NodePbr::EarlyShadowPass, early_shadow_pass_node); + draw_3d_graph.add_node(NodePbr::LateShadowPass, late_shadow_pass_node); + draw_3d_graph.add_node_edges(( + NodePbr::EarlyShadowPass, + NodePbr::LateShadowPass, + Node3d::StartMainPass, + )); } fn finish(&self, app: &mut App) { diff --git a/crates/bevy_pbr/src/light/ambient_light.rs b/crates/bevy_pbr/src/light/ambient_light.rs index f09bab51f6..db255722b3 100644 --- a/crates/bevy_pbr/src/light/ambient_light.rs +++ b/crates/bevy_pbr/src/light/ambient_light.rs @@ -18,7 +18,7 @@ use super::*; /// } /// ``` #[derive(Resource, Component, Clone, Debug, ExtractResource, ExtractComponent, Reflect)] -#[reflect(Resource, Component, Debug, Default)] +#[reflect(Resource, Component, Debug, Default, Clone)] #[require(Camera)] pub struct AmbientLight { pub color: Color, diff --git a/crates/bevy_pbr/src/light/directional_light.rs b/crates/bevy_pbr/src/light/directional_light.rs index 1eb17ea9ab..a5798fdde7 100644 --- a/crates/bevy_pbr/src/light/directional_light.rs +++ b/crates/bevy_pbr/src/light/directional_light.rs @@ -41,16 +41,9 @@ use super::*; /// To modify the cascade setup, such as the number of cascades or the maximum shadow distance, /// change the [`CascadeShadowConfig`] component of the entity with the [`DirectionalLight`]. /// -/// To control the resolution of the shadow maps, use the [`DirectionalLightShadowMap`] resource: -/// -/// ``` -/// # use bevy_app::prelude::*; -/// # use bevy_pbr::DirectionalLightShadowMap; -/// App::new() -/// .insert_resource(DirectionalLightShadowMap { size: 2048 }); -/// ``` +/// To control the resolution of the shadow maps, use the [`DirectionalLightShadowMap`] resource. #[derive(Component, Debug, Clone, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require( Cascades, CascadesFrusta, diff --git a/crates/bevy_pbr/src/light/mod.rs b/crates/bevy_pbr/src/light/mod.rs index 713ac1a27d..91ea9cddd3 100644 --- a/crates/bevy_pbr/src/light/mod.rs +++ b/crates/bevy_pbr/src/light/mod.rs @@ -1,7 +1,7 @@ use core::ops::DerefMut; use bevy_ecs::{ - entity::{hash_map::EntityHashMap, hash_set::EntityHashSet}, + entity::{EntityHashMap, EntityHashSet}, prelude::*, }; use bevy_math::{ops, Mat4, Vec3A, Vec4}; @@ -13,8 +13,8 @@ use bevy_render::{ mesh::Mesh3d, primitives::{Aabb, CascadesFrusta, CubemapFrusta, Frustum, Sphere}, view::{ - InheritedVisibility, NoFrustumCulling, RenderLayers, ViewVisibility, VisibilityClass, - VisibilityRange, VisibleEntityRanges, + InheritedVisibility, NoFrustumCulling, PreviousVisibleEntities, RenderLayers, + ViewVisibility, VisibilityClass, VisibilityRange, VisibleEntityRanges, }, }; use bevy_transform::components::{GlobalTransform, Transform}; @@ -91,9 +91,20 @@ pub mod light_consts { } } +/// Controls the resolution of [`PointLight`] shadow maps. +/// +/// ``` +/// # use bevy_app::prelude::*; +/// # use bevy_pbr::PointLightShadowMap; +/// App::new() +/// .insert_resource(PointLightShadowMap { size: 2048 }); +/// ``` #[derive(Resource, Clone, Debug, Reflect)] -#[reflect(Resource, Debug, Default)] +#[reflect(Resource, Debug, Default, Clone)] pub struct PointLightShadowMap { + /// The width and height of each of the 6 faces of the cubemap. + /// + /// Defaults to `1024`. pub size: usize, } @@ -108,9 +119,19 @@ impl Default for PointLightShadowMap { pub type WithLight = Or<(With, With, With)>; /// Controls the resolution of [`DirectionalLight`] shadow maps. +/// +/// ``` +/// # use bevy_app::prelude::*; +/// # use bevy_pbr::DirectionalLightShadowMap; +/// App::new() +/// .insert_resource(DirectionalLightShadowMap { size: 4096 }); +/// ``` #[derive(Resource, Clone, Debug, Reflect)] -#[reflect(Resource, Debug, Default)] +#[reflect(Resource, Debug, Default, Clone)] pub struct DirectionalLightShadowMap { + // The width and height of each cascade. + /// + /// Defaults to `2048`. pub size: usize, } @@ -134,7 +155,7 @@ impl Default for DirectionalLightShadowMap { /// }.into(); /// ``` #[derive(Component, Clone, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct CascadeShadowConfig { /// The (positive) distance to the far boundary of each cascade. pub bounds: Vec, @@ -276,13 +297,14 @@ impl From for CascadeShadowConfig { } #[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component, Debug, Default)] +#[reflect(Component, Debug, Default, Clone)] pub struct Cascades { /// Map from a view to the configuration of each of its [`Cascade`]s. pub(crate) cascades: EntityHashMap>, } #[derive(Clone, Debug, Default, Reflect)] +#[reflect(Clone, Default)] pub struct Cascade { /// The transform of the light, i.e. the view to world matrix. pub(crate) world_from_cascade: Mat4, @@ -472,7 +494,7 @@ pub struct TransmittedShadowReceiver; /// The different modes use different approaches to /// [Percentage Closer Filtering](https://developer.nvidia.com/gpugems/gpugems/part-ii-lighting-and-shadows/chapter-11-shadow-map-antialiasing). #[derive(Debug, Component, ExtractComponent, Reflect, Clone, Copy, PartialEq, Eq, Default)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub enum ShadowFilteringMethod { /// Hardware 2x2. /// @@ -491,8 +513,7 @@ pub enum ShadowFilteringMethod { Gaussian, /// A randomized filter that varies over time, good when TAA is in use. /// - /// Good quality when used with - /// [`TemporalAntiAliasing`](bevy_core_pipeline::experimental::taa::TemporalAntiAliasing) + /// Good quality when used with `TemporalAntiAliasing` /// and good performance. /// /// For directional and spot lights, this uses a [method by Jorge Jimenez for @@ -563,9 +584,13 @@ pub fn update_directional_light_frusta( // NOTE: Run this after assign_lights_to_clusters! pub fn update_point_light_frusta( global_lights: Res, - mut views: Query< - (Entity, &GlobalTransform, &PointLight, &mut CubemapFrusta), - Or<(Changed, Changed)>, + mut views: Query<(Entity, &GlobalTransform, &PointLight, &mut CubemapFrusta)>, + changed_lights: Query< + Entity, + ( + With, + Or<(Changed, Changed)>, + ), >, ) { let view_rotations = CUBE_MAP_FACES @@ -574,6 +599,12 @@ pub fn update_point_light_frusta( .collect::>(); for (entity, transform, point_light, mut cubemap_frusta) in &mut views { + // If this light hasn't changed, and neither has the set of global_lights, + // then we can skip this calculation. + if !global_lights.is_changed() && !changed_lights.contains(entity) { + continue; + } + // The frusta are used for culling meshes to the light for shadow mapping // so if shadow mapping is disabled for this light, then the frusta are // not needed. @@ -814,15 +845,23 @@ pub fn check_dir_light_mesh_visibility( // TODO: use resource to avoid unnecessary memory alloc let mut defer_queue = core::mem::take(defer_visible_entities_queue.deref_mut()); commands.queue(move |world: &mut World| { - let mut query = world.query::<&mut ViewVisibility>(); - for entities in defer_queue.iter_mut() { - let mut iter = query.iter_many_mut(world, entities.iter()); - while let Some(mut view_visibility) = iter.fetch_next() { - if !**view_visibility { - view_visibility.set(); + world.resource_scope::( + |world, mut previous_visible_entities| { + let mut query = world.query::<(Entity, &mut ViewVisibility)>(); + for entities in defer_queue.iter_mut() { + let mut iter = query.iter_many_mut(world, entities.iter()); + while let Some((entity, mut view_visibility)) = iter.fetch_next() { + if !**view_visibility { + view_visibility.set(); + } + + // Remove any entities that were discovered to be + // visible from the `PreviousVisibleEntities` resource. + previous_visible_entities.remove(&entity); + } } - } - } + }, + ); }); } @@ -860,6 +899,7 @@ pub fn check_point_light_mesh_visibility( ), >, visible_entity_ranges: Option>, + mut previous_visible_entities: ResMut, mut cubemap_visible_entities_queue: Local; 6]>>, mut spot_visible_entities_queue: Local>>, mut checked_lights: Local, @@ -961,10 +1001,17 @@ pub fn check_point_light_mesh_visibility( ); for entities in cubemap_visible_entities_queue.iter_mut() { - cubemap_visible_entities - .iter_mut() - .zip(entities.iter_mut()) - .for_each(|(dst, source)| dst.entities.append(source)); + for (dst, source) in + cubemap_visible_entities.iter_mut().zip(entities.iter_mut()) + { + // Remove any entities that were discovered to be + // visible from the `PreviousVisibleEntities` resource. + for entity in source.iter() { + previous_visible_entities.remove(entity); + } + + dst.entities.append(source); + } } for visible_entities in cubemap_visible_entities.iter_mut() { @@ -1047,6 +1094,12 @@ pub fn check_point_light_mesh_visibility( for entities in spot_visible_entities_queue.iter_mut() { visible_entities.append(entities); + + // Remove any entities that were discovered to be visible + // from the `PreviousVisibleEntities` resource. + for entity in entities { + previous_visible_entities.remove(entity); + } } shrink_entities(visible_entities.deref_mut()); diff --git a/crates/bevy_pbr/src/light/point_light.rs b/crates/bevy_pbr/src/light/point_light.rs index 800c7b9bd0..f2e4224d28 100644 --- a/crates/bevy_pbr/src/light/point_light.rs +++ b/crates/bevy_pbr/src/light/point_light.rs @@ -19,8 +19,14 @@ use super::*; /// | 4000 | 300 | | 75-100 | 40.5 | /// /// Source: [Wikipedia](https://en.wikipedia.org/wiki/Lumen_(unit)#Lighting) +/// +/// ## Shadows +/// +/// To enable shadows, set the `shadows_enabled` property to `true`. +/// +/// To control the resolution of the shadow maps, use the [`PointLightShadowMap`] resource. #[derive(Component, Debug, Clone, Copy, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require( CubemapFrusta, CubemapVisibleEntities, diff --git a/crates/bevy_pbr/src/light/spot_light.rs b/crates/bevy_pbr/src/light/spot_light.rs index 08160a8cfa..a7cfe1b817 100644 --- a/crates/bevy_pbr/src/light/spot_light.rs +++ b/crates/bevy_pbr/src/light/spot_light.rs @@ -8,7 +8,7 @@ use super::*; /// shines light only in a given direction. The direction is taken from /// the transform, and can be specified with [`Transform::looking_at`](Transform::looking_at). #[derive(Component, Debug, Clone, Copy, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require(Frustum, VisibleMeshEntities, Transform, Visibility, VisibilityClass)] #[component(on_add = view::add_visibility_class::)] pub struct SpotLight { diff --git a/crates/bevy_pbr/src/light_probe/environment_map.rs b/crates/bevy_pbr/src/light_probe/environment_map.rs index 8069f2acac..52ccaef432 100644 --- a/crates/bevy_pbr/src/light_probe/environment_map.rs +++ b/crates/bevy_pbr/src/light_probe/environment_map.rs @@ -81,7 +81,7 @@ pub const ENVIRONMENT_MAP_SHADER_HANDLE: Handle = /// /// See [`crate::environment_map`] for detailed information. #[derive(Clone, Component, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct EnvironmentMapLight { /// The blurry image that represents diffuse radiance surrounding a region. pub diffuse_map: Handle, diff --git a/crates/bevy_pbr/src/light_probe/irradiance_volume.rs b/crates/bevy_pbr/src/light_probe/irradiance_volume.rs index 799329e982..05dd51c379 100644 --- a/crates/bevy_pbr/src/light_probe/irradiance_volume.rs +++ b/crates/bevy_pbr/src/light_probe/irradiance_volume.rs @@ -81,17 +81,17 @@ //! less ideal for this use case: //! //! 1. The level 1 spherical harmonic coefficients can be negative. That -//! prevents the use of the efficient [RGB9E5 texture format], which only -//! encodes unsigned floating point numbers, and forces the use of the -//! less-efficient [RGBA16F format] if hardware interpolation is desired. +//! prevents the use of the efficient [RGB9E5 texture format], which only +//! encodes unsigned floating point numbers, and forces the use of the +//! less-efficient [RGBA16F format] if hardware interpolation is desired. //! //! 2. As an alternative to RGBA16F, level 1 spherical harmonics can be -//! normalized and scaled to the SH0 base color, as [Frostbite] does. This -//! allows them to be packed in standard LDR RGBA8 textures. However, this -//! prevents the use of hardware trilinear filtering, as the nonuniform scale -//! factor means that hardware interpolation no longer produces correct results. -//! The 8 texture fetches needed to interpolate between voxels can be upwards of -//! twice as slow as the hardware interpolation. +//! normalized and scaled to the SH0 base color, as [Frostbite] does. This +//! allows them to be packed in standard LDR RGBA8 textures. However, this +//! prevents the use of hardware trilinear filtering, as the nonuniform scale +//! factor means that hardware interpolation no longer produces correct results. +//! The 8 texture fetches needed to interpolate between voxels can be upwards of +//! twice as slow as the hardware interpolation. //! //! The following chart summarizes the costs and benefits of ambient cubes, //! level 1 spherical harmonics, and level 2 spherical harmonics: @@ -168,7 +168,7 @@ pub(crate) const IRRADIANCE_VOLUMES_ARE_USABLE: bool = cfg!(not(target_arch = "w /// /// See [`crate::irradiance_volume`] for detailed information. #[derive(Clone, Reflect, Component, Debug)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct IrradianceVolume { /// The 3D texture that represents the ambient cubes, encoded in the format /// described in [`crate::irradiance_volume`]. @@ -251,7 +251,7 @@ impl<'a> RenderViewIrradianceVolumeBindGroupEntries<'a> { fallback_image, ) } else { - RenderViewIrradianceVolumeBindGroupEntries::get_single( + RenderViewIrradianceVolumeBindGroupEntries::single( render_view_irradiance_volumes, images, fallback_image, @@ -295,7 +295,7 @@ impl<'a> RenderViewIrradianceVolumeBindGroupEntries<'a> { /// Looks up and returns the bindings for any irradiance volumes visible in /// the view, as well as the sampler. This is the version used when binding /// arrays aren't available on the current platform. - fn get_single( + fn single( render_view_irradiance_volumes: Option<&RenderViewLightProbes>, images: &'a RenderAssets, fallback_image: &'a FallbackImage, diff --git a/crates/bevy_pbr/src/light_probe/mod.rs b/crates/bevy_pbr/src/light_probe/mod.rs index 3ef3e78db8..ebfc7c7e7c 100644 --- a/crates/bevy_pbr/src/light_probe/mod.rs +++ b/crates/bevy_pbr/src/light_probe/mod.rs @@ -5,17 +5,17 @@ use bevy_asset::{load_internal_asset, weak_handle, AssetId, Handle}; use bevy_core_pipeline::core_3d::Camera3d; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - component::{require, Component}, + component::Component, entity::Entity, query::With, reflect::ReflectComponent, resource::Resource, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Commands, Local, Query, Res, ResMut}, }; use bevy_image::Image; use bevy_math::{Affine3A, FloatOrd, Mat4, Vec3A, Vec4}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ extract_instances::ExtractInstancesPlugin, @@ -106,7 +106,7 @@ pub struct LightProbePlugin; /// specific technique but rather to a class of techniques. Developers familiar /// with other engines should be aware of this terminology difference. #[derive(Component, Debug, Clone, Copy, Default, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require(Transform, Visibility)] pub struct LightProbe; @@ -769,22 +769,22 @@ pub(crate) fn add_cubemap_texture_view<'a>( /// (a.k.a. bindless textures). This function checks for these pitfalls: /// /// 1. If GLSL support is enabled at the feature level, then in debug mode -/// `naga_oil` will attempt to compile all shader modules under GLSL to check -/// validity of names, even if GLSL isn't actually used. This will cause a crash -/// if binding arrays are enabled, because binding arrays are currently -/// unimplemented in the GLSL backend of Naga. Therefore, we disable binding -/// arrays if the `shader_format_glsl` feature is present. +/// `naga_oil` will attempt to compile all shader modules under GLSL to check +/// validity of names, even if GLSL isn't actually used. This will cause a crash +/// if binding arrays are enabled, because binding arrays are currently +/// unimplemented in the GLSL backend of Naga. Therefore, we disable binding +/// arrays if the `shader_format_glsl` feature is present. /// /// 2. If there aren't enough texture bindings available to accommodate all the -/// binding arrays, the driver will panic. So we also bail out if there aren't -/// enough texture bindings available in the fragment shader. +/// binding arrays, the driver will panic. So we also bail out if there aren't +/// enough texture bindings available in the fragment shader. /// /// 3. If binding arrays aren't supported on the hardware, then we obviously /// can't use them. Adreno <= 610 claims to support bindless, but seems to be /// too buggy to be usable. /// /// 4. If binding arrays are supported on the hardware, but they can only be -/// accessed by uniform indices, that's not good enough, and we bail out. +/// accessed by uniform indices, that's not good enough, and we bail out. /// /// If binding arrays aren't usable, we disable reflection probes and limit the /// number of irradiance volumes in the scene to 1. diff --git a/crates/bevy_pbr/src/lightmap/mod.rs b/crates/bevy_pbr/src/lightmap/mod.rs index afaa71d792..4175d6ff61 100644 --- a/crates/bevy_pbr/src/lightmap/mod.rs +++ b/crates/bevy_pbr/src/lightmap/mod.rs @@ -41,13 +41,13 @@ use bevy_ecs::{ reflect::ReflectComponent, removal_detection::RemovedComponents, resource::Resource, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_image::Image; use bevy_math::{uvec2, vec4, Rect, UVec2}; -use bevy_platform_support::collections::HashSet; +use bevy_platform::collections::HashSet; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ render_asset::RenderAssets, @@ -88,7 +88,7 @@ pub struct LightmapPlugin; /// has a second UV layer ([`ATTRIBUTE_UV_1`](bevy_render::mesh::Mesh::ATTRIBUTE_UV_1)), /// then the lightmap will render using those UVs. #[derive(Component, Clone, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct Lightmap { /// The lightmap texture. pub image: Handle, @@ -116,9 +116,6 @@ pub struct Lightmap { /// There is one of these per visible lightmapped mesh instance. #[derive(Debug)] pub(crate) struct RenderLightmap { - /// The ID of the lightmap texture. - pub(crate) image: AssetId, - /// The rectangle within the lightmap texture that the UVs are relative to. /// /// The top left coordinate is the `min` part of the rect, and the bottom @@ -245,7 +242,6 @@ fn extract_lightmaps( render_lightmaps.render_lightmaps.insert( entity.into(), RenderLightmap::new( - lightmap.image.id(), lightmap.uv_rect, slab_index, slot_index, @@ -305,14 +301,12 @@ impl RenderLightmap { /// Creates a new lightmap from a texture, a UV rect, and a slab and slot /// index pair. fn new( - image: AssetId, uv_rect: Rect, slab_index: LightmapSlabIndex, slot_index: LightmapSlotIndex, bicubic_sampling: bool, ) -> Self { Self { - image, uv_rect, slab_index, slot_index, diff --git a/crates/bevy_pbr/src/material.rs b/crates/bevy_pbr/src/material.rs index 32cc445d42..1814996184 100644 --- a/crates/bevy_pbr/src/material.rs +++ b/crates/bevy_pbr/src/material.rs @@ -1,4 +1,6 @@ -use crate::material_bind_groups::{MaterialBindGroupAllocator, MaterialBindingId}; +use crate::material_bind_groups::{ + FallbackBindlessResources, MaterialBindGroupAllocator, MaterialBindingId, +}; #[cfg(feature = "meshlet")] use crate::meshlet::{ prepare_material_meshlet_meshes_main_opaque_pass, queue_material_meshlet_meshes, @@ -19,7 +21,6 @@ use bevy_core_pipeline::{ }; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::component::Tick; -use bevy_ecs::entity::EntityHash; use bevy_ecs::system::SystemChangeTick; use bevy_ecs::{ prelude::*, @@ -28,10 +29,15 @@ use bevy_ecs::{ SystemParamItem, }, }; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::hash_map::Entry; +use bevy_platform::collections::{HashMap, HashSet}; +use bevy_platform::hash::FixedHasher; use bevy_reflect::std_traits::ReflectDefault; use bevy_reflect::Reflect; +use bevy_render::camera::extract_cameras; use bevy_render::mesh::mark_3d_meshes_as_changed_if_their_assets_changed; +use bevy_render::render_asset::prepare_assets; +use bevy_render::renderer::RenderQueue; use bevy_render::{ batching::gpu_preprocessing::GpuPreprocessingSupport, extract_resource::ExtractResource, @@ -41,11 +47,12 @@ use bevy_render::{ render_resource::*, renderer::RenderDevice, sync_world::MainEntity, - view::{ExtractedView, Msaa, RenderVisibilityRanges, ViewVisibility}, + view::{ExtractedView, Msaa, RenderVisibilityRanges, RetainedViewEntity, ViewVisibility}, Extract, }; use bevy_render::{mesh::allocator::MeshAllocator, sync_world::MainEntityHashMap}; use bevy_render::{texture::FallbackImage, view::RenderVisibleEntities}; +use bevy_utils::Parallel; use core::{hash::Hash, marker::PhantomData}; use tracing::error; @@ -252,6 +259,8 @@ pub struct MaterialPlugin { pub prepass_enabled: bool, /// Controls if shadows are enabled for the Material. pub shadows_enabled: bool, + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, pub _marker: PhantomData, } @@ -260,6 +269,7 @@ impl Default for MaterialPlugin { Self { prepass_enabled: true, shadows_enabled: true, + debug_flags: RenderDebugFlags::default(), _marker: Default::default(), } } @@ -296,7 +306,7 @@ where .init_resource::>() .init_resource::>() .init_resource::>() - .init_resource::>() + .init_resource::() .add_render_command::>() .add_render_command::>() .add_render_command::>() @@ -306,8 +316,11 @@ where .add_systems( ExtractSchedule, ( - extract_mesh_materials::.before(ExtractMeshesSet), - extract_entities_needs_specialization::, + extract_mesh_materials::.in_set(ExtractMaterialsSet), + early_sweep_material_instances:: + .after(ExtractMaterialsSet) + .before(late_sweep_material_instances), + extract_entities_needs_specialization::.after(extract_cameras), ), ) .add_systems( @@ -317,7 +330,8 @@ where .in_set(RenderSet::PrepareMeshes) .after(prepare_assets::>) .after(prepare_assets::) - .after(collect_meshes_for_gpu_building), + .after(collect_meshes_for_gpu_building) + .after(set_mesh_motion_vector_flags), queue_material_meshes:: .in_set(RenderSet::QueueMeshes) .after(prepare_assets::>), @@ -325,7 +339,11 @@ where ) .add_systems( Render, - prepare_material_bind_groups:: + ( + prepare_material_bind_groups::, + write_material_bind_group_buffers::, + ) + .chain() .in_set(RenderSet::PrepareBindGroups) .after(prepare_assets::>), ); @@ -339,9 +357,11 @@ where Render, ( check_views_lights_need_specialization.in_set(RenderSet::PrepareAssets), + // specialize_shadows:: also needs to run after prepare_assets::>, + // which is fine since ManageViews is after PrepareAssets specialize_shadows:: - .in_set(RenderSet::PrepareMeshes) - .after(prepare_assets::>), + .in_set(RenderSet::ManageViews) + .after(prepare_lights), queue_shadows:: .in_set(RenderSet::QueueMeshes) .after(prepare_assets::>), @@ -374,7 +394,7 @@ where } if self.prepass_enabled { - app.add_plugins(PrepassPlugin::::default()); + app.add_plugins(PrepassPlugin::::new(self.debug_flags)); } } @@ -387,6 +407,14 @@ where } } +/// A dummy [`AssetId`] that we use as a placeholder whenever a mesh doesn't +/// have a material. +/// +/// See the comments in [`RenderMaterialInstances::mesh_material`] for more +/// information. +pub(crate) static DUMMY_MESH_MATERIAL: AssetId = + AssetId::::invalid(); + /// A key uniquely identifying a specialized [`MaterialPipeline`]. pub struct MaterialPipelineKey { pub mesh_key: MeshPipelineKey, @@ -506,7 +534,7 @@ impl FromWorld for MaterialPipeline { ShaderRef::Handle(handle) => Some(handle), ShaderRef::Path(path) => Some(asset_server.load(path)), }, - bindless: material_bind_groups::material_uses_bindless_resources::(render_device), + bindless: material_uses_bindless_resources::(render_device), marker: PhantomData, } } @@ -525,7 +553,7 @@ pub struct SetMaterialBindGroup(PhantomData); impl RenderCommand

for SetMaterialBindGroup { type Param = ( SRes>>, - SRes>, + SRes, SRes>, ); type ViewQuery = (); @@ -547,17 +575,20 @@ impl RenderCommand

for SetMaterial let material_instances = material_instances.into_inner(); let material_bind_group_allocator = material_bind_group_allocator.into_inner(); - let Some(material_asset_id) = material_instances.get(&item.main_entity()) else { + let Some(material_instance) = material_instances.instances.get(&item.main_entity()) else { return RenderCommandResult::Skip; }; - let Some(material) = materials.get(*material_asset_id) else { + let Ok(material_asset_id) = material_instance.asset_id.try_typed::() else { + return RenderCommandResult::Skip; + }; + let Some(material) = materials.get(material_asset_id) else { return RenderCommandResult::Skip; }; let Some(material_bind_group) = material_bind_group_allocator.get(material.binding.group) else { return RenderCommandResult::Skip; }; - let Some(bind_group) = material_bind_group.get_bind_group() else { + let Some(bind_group) = material_bind_group.bind_group() else { return RenderCommandResult::Skip; }; pass.set_bind_group(I, bind_group, &[]); @@ -565,16 +596,49 @@ impl RenderCommand

for SetMaterial } } -/// Stores all extracted instances of a [`Material`] in the render world. -#[derive(Resource, Deref, DerefMut)] -pub struct RenderMaterialInstances(pub MainEntityHashMap>); +/// Stores all extracted instances of all [`Material`]s in the render world. +#[derive(Resource, Default)] +pub struct RenderMaterialInstances { + /// Maps from each entity in the main world to the + /// [`RenderMaterialInstance`] associated with it. + pub instances: MainEntityHashMap, + /// A monotonically-increasing counter, which we use to sweep + /// [`RenderMaterialInstances::instances`] when the entities and/or required + /// components are removed. + current_change_tick: Tick, +} -impl Default for RenderMaterialInstances { - fn default() -> Self { - Self(Default::default()) +impl RenderMaterialInstances { + /// Returns the mesh material ID for the entity with the given mesh, or a + /// dummy mesh material ID if the mesh has no material ID. + /// + /// Meshes almost always have materials, but in very specific circumstances + /// involving custom pipelines they won't. (See the + /// `specialized_mesh_pipelines` example.) + pub(crate) fn mesh_material(&self, entity: MainEntity) -> UntypedAssetId { + match self.instances.get(&entity) { + Some(render_instance) => render_instance.asset_id, + None => DUMMY_MESH_MATERIAL.into(), + } } } +/// The material associated with a single mesh instance in the main world. +/// +/// Note that this uses an [`UntypedAssetId`] and isn't generic over the +/// material type, for simplicity. +pub struct RenderMaterialInstance { + /// The material asset. + pub(crate) asset_id: UntypedAssetId, + /// The [`RenderMaterialInstances::current_change_tick`] at which this + /// material instance was last modified. + last_change_tick: Tick, +} + +/// A [`SystemSet`] that contains all `extract_mesh_materials` systems. +#[derive(SystemSet, Clone, PartialEq, Eq, Debug, Hash)] +pub struct ExtractMaterialsSet; + pub const fn alpha_mode_pipeline_key(alpha_mode: AlphaMode, msaa: &Msaa) -> MeshPipelineKey { match alpha_mode { // Premultiplied and Add share the same pipeline key @@ -631,7 +695,7 @@ pub const fn screen_space_specular_transmission_pipeline_key( /// /// As [`crate::render::mesh::collect_meshes_for_gpu_building`] only considers /// meshes that were newly extracted, and it writes information from the -/// [`RenderMeshMaterialIds`] into the +/// [`RenderMaterialInstances`] into the /// [`crate::render::mesh::MeshInputUniform`], we must tell /// [`crate::render::mesh::extract_meshes_for_gpu_building`] to re-extract a /// mesh if its material changed. Otherwise, the material binding information in @@ -640,7 +704,10 @@ pub const fn screen_space_specular_transmission_pipeline_key( /// [`crate::render::mesh::extract_meshes_for_gpu_building`] re-extracts a mesh /// is to mark its [`Mesh3d`] as changed, so that's what this system does. fn mark_meshes_as_changed_if_their_materials_changed( - mut changed_meshes_query: Query<&mut Mesh3d, Changed>>, + mut changed_meshes_query: Query< + &mut Mesh3d, + Or<(Changed>, AssetChanged>)>, + >, ) where M: Material, { @@ -649,51 +716,138 @@ fn mark_meshes_as_changed_if_their_materials_changed( } } -/// Fills the [`RenderMaterialInstances`] and [`RenderMeshMaterialIds`] -/// resources from the meshes in the scene. +/// Fills the [`RenderMaterialInstances`] resources from the meshes in the +/// scene. fn extract_mesh_materials( - mut material_instances: ResMut>, - mut material_ids: ResMut, + mut material_instances: ResMut, changed_meshes_query: Extract< Query< (Entity, &ViewVisibility, &MeshMaterial3d), Or<(Changed, Changed>)>, >, >, - mut removed_visibilities_query: Extract>, - mut removed_materials_query: Extract>>, ) { + let last_change_tick = material_instances.current_change_tick; + for (entity, view_visibility, material) in &changed_meshes_query { if view_visibility.get() { - material_instances.insert(entity.into(), material.id()); - material_ids.insert(entity.into(), material.id().into()); + material_instances.instances.insert( + entity.into(), + RenderMaterialInstance { + asset_id: material.id().untyped(), + last_change_tick, + }, + ); } else { - material_instances.remove(&MainEntity::from(entity)); - material_ids.remove(entity.into()); + material_instances + .instances + .remove(&MainEntity::from(entity)); + } + } +} + +/// Removes mesh materials from [`RenderMaterialInstances`] when their +/// [`MeshMaterial3d`] components are removed. +/// +/// This is tricky because we have to deal with the case in which a material of +/// type A was removed and replaced with a material of type B in the same frame +/// (which is actually somewhat common of an operation). In this case, even +/// though an entry will be present in `RemovedComponents>`, +/// we must not remove the entry in `RenderMaterialInstances` which corresponds +/// to material B. To handle this case, we use change ticks to avoid removing +/// the entry if it was updated this frame. +/// +/// This is the first of two sweep phases. Because this phase runs once per +/// material type, we need a second phase in order to guarantee that we only +/// bump [`RenderMaterialInstances::current_change_tick`] once. +fn early_sweep_material_instances( + mut material_instances: ResMut, + mut removed_materials_query: Extract>>, +) where + M: Material, +{ + let last_change_tick = material_instances.current_change_tick; + + for entity in removed_materials_query.read() { + if let Entry::Occupied(occupied_entry) = material_instances.instances.entry(entity.into()) { + // Only sweep the entry if it wasn't updated this frame. + if occupied_entry.get().last_change_tick != last_change_tick { + occupied_entry.remove(); + } + } + } +} + +/// Removes mesh materials from [`RenderMaterialInstances`] when their +/// [`ViewVisibility`] components are removed. +/// +/// This runs after all invocations of [`early_sweep_material_instances`] and is +/// responsible for bumping [`RenderMaterialInstances::current_change_tick`] in +/// preparation for a new frame. +pub(crate) fn late_sweep_material_instances( + mut material_instances: ResMut, + mut removed_visibilities_query: Extract>, +) { + let last_change_tick = material_instances.current_change_tick; + + for entity in removed_visibilities_query.read() { + if let Entry::Occupied(occupied_entry) = material_instances.instances.entry(entity.into()) { + // Only sweep the entry if it wasn't updated this frame. It's + // possible that a `ViewVisibility` component was removed and + // re-added in the same frame. + if occupied_entry.get().last_change_tick != last_change_tick { + occupied_entry.remove(); + } } } - for entity in removed_visibilities_query - .read() - .chain(removed_materials_query.read()) - { - // Only queue a mesh for removal if we didn't pick it up above. - // It's possible that a necessary component was removed and re-added in - // the same frame. - if !changed_meshes_query.contains(entity) { - material_instances.remove(&MainEntity::from(entity)); - material_ids.remove(entity.into()); - } - } + material_instances + .current_change_tick + .set(last_change_tick.get() + 1); } pub fn extract_entities_needs_specialization( entities_needing_specialization: Extract>>, mut entity_specialization_ticks: ResMut>, + mut removed_mesh_material_components: Extract>>, + mut specialized_material_pipeline_cache: ResMut>, + mut specialized_prepass_material_pipeline_cache: Option< + ResMut>, + >, + mut specialized_shadow_material_pipeline_cache: Option< + ResMut>, + >, + views: Query<&ExtractedView>, ticks: SystemChangeTick, ) where M: Material, { + // Clean up any despawned entities, we do this first in case the removed material was re-added + // the same frame, thus will appear both in the removed components list and have been added to + // the `EntitiesNeedingSpecialization` collection by triggering the `Changed` filter + for entity in removed_mesh_material_components.read() { + entity_specialization_ticks.remove(&MainEntity::from(entity)); + for view in views { + if let Some(cache) = + specialized_material_pipeline_cache.get_mut(&view.retained_view_entity) + { + cache.remove(&MainEntity::from(entity)); + } + if let Some(cache) = specialized_prepass_material_pipeline_cache + .as_mut() + .and_then(|c| c.get_mut(&view.retained_view_entity)) + { + cache.remove(&MainEntity::from(entity)); + } + if let Some(cache) = specialized_shadow_material_pipeline_cache + .as_mut() + .and_then(|c| c.get_mut(&view.retained_view_entity)) + { + cache.remove(&MainEntity::from(entity)); + } + } + } + for entity in entities_needing_specialization.iter() { // Update the entity's specialization tick with this run's tick entity_specialization_ticks.insert((*entity).into(), ticks.this_run()); @@ -732,11 +886,22 @@ impl Default for EntitySpecializationTicks { } } +/// Stores the [`SpecializedMaterialViewPipelineCache`] for each view. #[derive(Resource, Deref, DerefMut)] pub struct SpecializedMaterialPipelineCache { - // (view_entity, material_entity) -> (tick, pipeline_id) + // view entity -> view pipeline cache #[deref] - map: HashMap<(MainEntity, MainEntity), (Tick, CachedRenderPipelineId), EntityHash>, + map: HashMap>, + marker: PhantomData, +} + +/// Stores the cached render pipeline ID for each entity in a single view, as +/// well as the last time it was changed. +#[derive(Deref, DerefMut)] +pub struct SpecializedMaterialViewPipelineCache { + // material entity -> (tick, pipeline_id) + #[deref] + map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>, marker: PhantomData, } @@ -749,31 +914,47 @@ impl Default for SpecializedMaterialPipelineCache { } } +impl Default for SpecializedMaterialViewPipelineCache { + fn default() -> Self { + Self { + map: MainEntityHashMap::default(), + marker: PhantomData, + } + } +} + pub fn check_entities_needing_specialization( needs_specialization: Query< Entity, - Or<( - Changed, - AssetChanged, - Changed>, - AssetChanged>, - )>, + ( + Or<( + Changed, + AssetChanged, + Changed>, + AssetChanged>, + )>, + With>, + ), >, + mut par_local: Local>>, mut entities_needing_specialization: ResMut>, ) where M: Material, { entities_needing_specialization.clear(); - for entity in &needs_specialization { - entities_needing_specialization.push(entity); - } + + needs_specialization + .par_iter() + .for_each(|entity| par_local.borrow_local_mut().push(entity)); + + par_local.drain_into(&mut entities_needing_specialization); } pub fn specialize_material_meshes( render_meshes: Res>, render_materials: Res>>, render_mesh_instances: Res, - render_material_instances: Res>, + render_material_instances: Res, render_lightmaps: Res, render_visibility_ranges: Res, ( @@ -789,7 +970,7 @@ pub fn specialize_material_meshes( Res>, Res>, ), - views: Query<(&MainEntity, &ExtractedView, &RenderVisibleEntities)>, + views: Query<(&ExtractedView, &RenderVisibleEntities)>, view_key_cache: Res, entity_specialization_ticks: Res>, view_specialization_ticks: Res, @@ -801,7 +982,13 @@ pub fn specialize_material_meshes( ) where M::Data: PartialEq + Eq + Hash + Clone, { - for (view_entity, view, visible_entities) in &views { + // Record the retained IDs of all shadow views so that we can expire old + // pipeline IDs. + let mut all_views: HashSet = HashSet::default(); + + for (view, visible_entities) in &views { + all_views.insert(view.retained_view_entity); + if !transparent_render_phases.contains_key(&view.retained_view_entity) && !opaque_render_phases.contains_key(&view.retained_view_entity) && !alpha_mask_render_phases.contains_key(&view.retained_view_entity) @@ -810,15 +997,32 @@ pub fn specialize_material_meshes( continue; } - let Some(view_key) = view_key_cache.get(view_entity) else { + let Some(view_key) = view_key_cache.get(&view.retained_view_entity) else { continue; }; + let view_tick = view_specialization_ticks + .get(&view.retained_view_entity) + .unwrap(); + let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache + .entry(view.retained_view_entity) + .or_default(); + for (_, visible_entity) in visible_entities.iter::() { - let view_tick = view_specialization_ticks.get(view_entity).unwrap(); + let Some(material_instance) = render_material_instances.instances.get(visible_entity) + else { + continue; + }; + let Ok(material_asset_id) = material_instance.asset_id.try_typed::() else { + continue; + }; + let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) + else { + continue; + }; let entity_tick = entity_specialization_ticks.get(visible_entity).unwrap(); - let last_specialized_tick = specialized_material_pipeline_cache - .get(&(*view_entity, *visible_entity)) + let last_specialized_tick = view_specialized_material_pipeline_cache + .get(visible_entity) .map(|(tick, _)| *tick); let needs_specialization = last_specialized_tick.is_none_or(|tick| { view_tick.is_newer_than(tick, ticks.this_run()) @@ -827,17 +1031,10 @@ pub fn specialize_material_meshes( if !needs_specialization { continue; } - let Some(material_asset_id) = render_material_instances.get(visible_entity) else { - continue; - }; - let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) - else { - continue; - }; let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else { continue; }; - let Some(material) = render_materials.get(*material_asset_id) else { + let Some(material) = render_materials.get(material_asset_id) else { continue; }; let Some(material_bind_group) = @@ -898,12 +1095,14 @@ pub fn specialize_material_meshes( } }; - specialized_material_pipeline_cache.insert( - (*view_entity, *visible_entity), - (ticks.this_run(), pipeline_id), - ); + view_specialized_material_pipeline_cache + .insert(*visible_entity, (ticks.this_run(), pipeline_id)); } } + + // Delete specialized pipelines belonging to views that have expired. + specialized_material_pipeline_cache + .retain(|retained_view_entity, _| all_views.contains(retained_view_entity)); } /// For each view, iterates over all the meshes visible from that view and adds @@ -911,19 +1110,19 @@ pub fn specialize_material_meshes( pub fn queue_material_meshes( render_materials: Res>>, render_mesh_instances: Res, - render_material_instances: Res>, + render_material_instances: Res, mesh_allocator: Res, gpu_preprocessing_support: Res, mut opaque_render_phases: ResMut>, mut alpha_mask_render_phases: ResMut>, mut transmissive_render_phases: ResMut>, mut transparent_render_phases: ResMut>, - views: Query<(&MainEntity, &ExtractedView, &RenderVisibleEntities)>, + views: Query<(&ExtractedView, &RenderVisibleEntities)>, specialized_material_pipeline_cache: ResMut>, ) where M::Data: PartialEq + Eq + Hash + Clone, { - for (view_entity, view, visible_entities) in &views { + for (view, visible_entities) in &views { let ( Some(opaque_phase), Some(alpha_mask_phase), @@ -939,10 +1138,16 @@ pub fn queue_material_meshes( continue; }; + let Some(view_specialized_material_pipeline_cache) = + specialized_material_pipeline_cache.get(&view.retained_view_entity) + else { + continue; + }; + let rangefinder = view.rangefinder3d(); for (render_entity, visible_entity) in visible_entities.iter::() { - let Some((current_change_tick, pipeline_id)) = specialized_material_pipeline_cache - .get(&(*view_entity, *visible_entity)) + let Some((current_change_tick, pipeline_id)) = view_specialized_material_pipeline_cache + .get(visible_entity) .map(|(current_change_tick, pipeline_id)| (*current_change_tick, *pipeline_id)) else { continue; @@ -955,14 +1160,18 @@ pub fn queue_material_meshes( continue; } - let Some(material_asset_id) = render_material_instances.get(visible_entity) else { + let Some(material_instance) = render_material_instances.instances.get(visible_entity) + else { + continue; + }; + let Ok(material_asset_id) = material_instance.asset_id.try_typed::() else { continue; }; let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) else { continue; }; - let Some(material) = render_materials.get(*material_asset_id) else { + let Some(material) = render_materials.get(material_asset_id) else { continue; }; @@ -985,6 +1194,11 @@ pub fn queue_material_meshes( } RenderPhaseType::Opaque => { if material.properties.render_method == OpaqueRendererMethod::Deferred { + // Even though we aren't going to insert the entity into + // a bin, we still want to update its cache entry. That + // way, we know we don't need to re-examine it in future + // frames. + opaque_phase.update_cache(*visible_entity, None, current_change_tick); continue; } let batch_set_key = Opaque3dBatchSetKey { @@ -1002,6 +1216,7 @@ pub fn queue_material_meshes( batch_set_key, bin_key, (*render_entity, *visible_entity), + mesh_instance.current_uniform_index, BinnedRenderPhaseType::mesh( mesh_instance.should_batch(), &gpu_preprocessing_support, @@ -1025,6 +1240,7 @@ pub fn queue_material_meshes( batch_set_key, bin_key, (*render_entity, *visible_entity), + mesh_instance.current_uniform_index, BinnedRenderPhaseType::mesh( mesh_instance.should_batch(), &gpu_preprocessing_support, @@ -1047,16 +1263,12 @@ pub fn queue_material_meshes( } } } - - // Remove invalid entities from the bins. - opaque_phase.sweep_old_entities(); - alpha_mask_phase.sweep_old_entities(); } } /// Default render method used for opaque materials. #[derive(Default, Resource, Clone, Debug, ExtractResource, Reflect)] -#[reflect(Resource, Default, Debug)] +#[reflect(Resource, Default, Debug, Clone)] pub struct DefaultOpaqueRendererMethod(OpaqueRendererMethod); impl DefaultOpaqueRendererMethod { @@ -1096,6 +1308,7 @@ impl DefaultOpaqueRendererMethod { /// /// If a material indicates `OpaqueRendererMethod::Auto`, `DefaultOpaqueRendererMethod` will be used. #[derive(Default, Clone, Copy, Debug, PartialEq, Reflect)] +#[reflect(Default, Clone, PartialEq)] pub enum OpaqueRendererMethod { #[default] Forward, @@ -1180,8 +1393,8 @@ impl RenderAsset for PreparedMaterial { render_device, pipeline, default_opaque_render_method, - ref mut bind_group_allocator, - ref mut render_material_bindings, + bind_group_allocator, + render_material_bindings, opaque_draw_functions, alpha_mask_draw_functions, transmissive_draw_functions, @@ -1190,14 +1403,9 @@ impl RenderAsset for PreparedMaterial { alpha_mask_prepass_draw_functions, opaque_deferred_draw_functions, alpha_mask_deferred_draw_functions, - ref mut material_param, + material_param, ): &mut SystemParamItem, ) -> Result> { - // Allocate a material binding ID if needed. - let material_binding_id = *render_material_bindings - .entry(material_id.into()) - .or_insert_with(|| bind_group_allocator.allocate()); - let draw_opaque_pbr = opaque_draw_functions.read().id::>(); let draw_alpha_mask_pbr = alpha_mask_draw_functions.read().id::>(); let draw_transmissive_pbr = transmissive_draw_functions.read().id::>(); @@ -1263,10 +1471,27 @@ impl RenderAsset for PreparedMaterial { false, ) { Ok(unprepared) => { - bind_group_allocator.init(render_device, material_binding_id, unprepared); + // Allocate or update the material. + let binding = match render_material_bindings.entry(material_id.into()) { + Entry::Occupied(mut occupied_entry) => { + // TODO: Have a fast path that doesn't require + // recreating the bind group if only buffer contents + // change. For now, we just delete and recreate the bind + // group. + bind_group_allocator.free(*occupied_entry.get()); + let new_binding = bind_group_allocator + .allocate_unprepared(unprepared, &pipeline.material_layout); + *occupied_entry.get_mut() = new_binding; + new_binding + } + Entry::Vacant(vacant_entry) => *vacant_entry.insert( + bind_group_allocator + .allocate_unprepared(unprepared, &pipeline.material_layout), + ), + }; Ok(PreparedMaterial { - binding: material_binding_id, + binding, properties: MaterialProperties { alpha_mode: material.alpha_mode(), depth_bias: material.depth_bias(), @@ -1298,11 +1523,9 @@ impl RenderAsset for PreparedMaterial { ) { Ok(prepared_bind_group) => { // Store the resulting bind group directly in the slot. - bind_group_allocator.init_custom( - material_binding_id, - prepared_bind_group.bind_group, - prepared_bind_group.data, - ); + let material_binding_id = + bind_group_allocator.allocate_prepared(prepared_bind_group); + render_material_bindings.insert(material_id.into(), material_binding_id); Ok(PreparedMaterial { binding: material_binding_id, @@ -1335,14 +1558,9 @@ impl RenderAsset for PreparedMaterial { fn unload_asset( source_asset: AssetId, - ( - _, - _, - _, - ref mut bind_group_allocator, - ref mut render_material_bindings, - .., - ): &mut SystemParamItem, + (_, _, _, bind_group_allocator, render_material_bindings, ..): &mut SystemParamItem< + Self::Param, + >, ) { let Some(material_binding_id) = render_material_bindings.remove(&source_asset.untyped()) else { @@ -1367,8 +1585,8 @@ impl From for MaterialBindGroupId { } } -/// A system that creates and/or recreates any bind groups that contain -/// materials that were modified this frame. +/// Creates and/or recreates any bind groups that contain materials that were +/// modified this frame. pub fn prepare_material_bind_groups( mut allocator: ResMut>, render_device: Res, @@ -1377,5 +1595,20 @@ pub fn prepare_material_bind_groups( ) where M: Material, { - allocator.prepare_bind_groups(&render_device, &fallback_image, &fallback_resources); + allocator.prepare_bind_groups(&render_device, &fallback_resources, &fallback_image); +} + +/// Uploads the contents of all buffers that the [`MaterialBindGroupAllocator`] +/// manages to the GPU. +/// +/// Non-bindless allocators don't currently manage any buffers, so this method +/// only has an effect for bindless allocators. +pub fn write_material_bind_group_buffers( + mut allocator: ResMut>, + render_device: Res, + render_queue: Res, +) where + M: Material, +{ + allocator.write_buffers(&render_device, &render_queue); } diff --git a/crates/bevy_pbr/src/material_bind_groups.rs b/crates/bevy_pbr/src/material_bind_groups.rs index 9066537a72..b539d2098f 100644 --- a/crates/bevy_pbr/src/material_bind_groups.rs +++ b/crates/bevy_pbr/src/material_bind_groups.rs @@ -1,137 +1,288 @@ //! Material bind group management for bindless resources. //! -//! In bindless mode, Bevy's renderer groups materials into small bind groups. -//! This allocator manages each bind group, assigning slots to materials as +//! In bindless mode, Bevy's renderer groups materials into bind groups. This +//! allocator manages each bind group, assigning slots to materials as //! appropriate. -use crate::Material; +use core::{cmp::Ordering, iter, marker::PhantomData, mem, ops::Range}; + use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ resource::Resource, world::{FromWorld, World}, }; -use bevy_platform_support::collections::HashMap; -use bevy_reflect::{std_traits::ReflectDefault, Reflect}; +use bevy_platform::collections::{HashMap, HashSet}; +use bevy_reflect::{prelude::ReflectDefault, Reflect}; use bevy_render::{ render_resource::{ - BindGroup, BindGroupEntry, BindGroupLayout, BindGroupLayoutEntry, BindingResource, - BindingType, Buffer, BufferBinding, BufferInitDescriptor, BufferUsages, - OwnedBindingResource, Sampler, SamplerDescriptor, TextureViewDimension, - UnpreparedBindGroup, WgpuSampler, WgpuTextureView, + BindGroup, BindGroupEntry, BindGroupLayout, BindingNumber, BindingResource, + BindingResources, BindlessDescriptor, BindlessIndex, BindlessIndexTableDescriptor, + BindlessResourceType, Buffer, BufferBinding, BufferDescriptor, BufferId, + BufferInitDescriptor, BufferUsages, CompareFunction, FilterMode, OwnedBindingResource, + PreparedBindGroup, RawBufferVec, Sampler, SamplerDescriptor, SamplerId, TextureView, + TextureViewDimension, TextureViewId, UnpreparedBindGroup, WgpuSampler, WgpuTextureView, }, - renderer::RenderDevice, + renderer::{RenderDevice, RenderQueue}, + settings::WgpuFeatures, texture::FallbackImage, }; use bevy_utils::default; -use core::{any, iter, marker::PhantomData, num::NonZero}; -use tracing::error; +use bytemuck::Pod; +use tracing::{error, trace}; -/// An object that creates and stores bind groups for a single material type. +use crate::Material; + +/// A resource that places materials into bind groups and tracks their +/// resources. /// -/// This object collects bindless materials into groups as appropriate and -/// assigns slots as materials are created. +/// Internally, Bevy has separate allocators for bindless and non-bindless +/// materials. This resource provides a common interface to the specific +/// allocator in use. #[derive(Resource)] -pub struct MaterialBindGroupAllocator +pub enum MaterialBindGroupAllocator where M: Material, { - /// The data that the allocator keeps about each bind group. - bind_groups: Vec>, + /// The allocator used when the material is bindless. + Bindless(Box>), + /// The allocator used when the material is non-bindless. + NonBindless(Box>), +} - /// Stores IDs of material bind groups that have at least one slot - /// available. - free_bind_groups: Vec, - - /// The layout for this bind group. +/// The allocator that places bindless materials into bind groups and tracks +/// their resources. +pub struct MaterialBindGroupBindlessAllocator +where + M: Material, +{ + /// The slabs, each of which contains a bind group. + slabs: Vec>, + /// The layout of the bind groups that we produce. bind_group_layout: BindGroupLayout, - - /// Dummy buffers that are assigned to unused slots. - fallback_buffers: MaterialFallbackBuffers, - - /// Whether this material is actually using bindless resources. + /// Information about the bindless resources in the material. /// - /// This takes the availability of bindless resources on this platform into - /// account. - bindless_enabled: bool, + /// We use this information to create and maintain bind groups. + bindless_descriptor: BindlessDescriptor, + /// Dummy buffers that we use to fill empty slots in buffer binding arrays. + /// + /// There's one fallback buffer for each buffer in the bind group, each + /// appropriately sized. Each buffer contains one uninitialized element of + /// the applicable type. + fallback_buffers: HashMap, + + /// The maximum number of resources that can be stored in a slab. + /// + /// This corresponds to `SLAB_CAPACITY` in the `#[bindless(SLAB_CAPACITY)]` + /// attribute, when deriving `AsBindGroup`. + slab_capacity: u32, +} + +/// A single bind group and the bookkeeping necessary to allocate into it. +pub struct MaterialBindlessSlab +where + M: Material, +{ + /// The current bind group, if it's up to date. + /// + /// If this is `None`, then the bind group is dirty and needs to be + /// regenerated. + bind_group: Option, + + /// The GPU-accessible buffers that hold the mapping from binding index to + /// bindless slot. + /// + /// This is conventionally assigned to bind group binding 0, but it can be + /// changed using the `#[bindless(index_table(binding(B)))]` attribute on + /// `AsBindGroup`. + /// + /// Because the slab binary searches this table, the entries within must be + /// sorted by bindless index. + bindless_index_tables: Vec>, + + /// The binding arrays containing samplers. + samplers: HashMap>, + /// The binding arrays containing textures. + textures: HashMap>, + /// The binding arrays containing buffers. + buffers: HashMap>, + /// The buffers that contain plain old data (i.e. the structure-level + /// `#[data]` attribute of `AsBindGroup`). + data_buffers: HashMap, + + /// Holds extra CPU-accessible data that the material provides. + /// + /// Typically, this data is used for constructing the material key, for + /// pipeline specialization purposes. + extra_data: Vec>, + + /// A list of free slot IDs. + free_slots: Vec, + /// The total number of materials currently allocated in this slab. + live_allocation_count: u32, + /// The total number of resources currently allocated in the binding arrays. + allocated_resource_count: u32, +} + +/// A GPU-accessible buffer that holds the mapping from binding index to +/// bindless slot. +/// +/// This is conventionally assigned to bind group binding 0, but it can be +/// changed by altering the [`Self::binding_number`], which corresponds to the +/// `#[bindless(index_table(binding(B)))]` attribute in `AsBindGroup`. +struct MaterialBindlessIndexTable +where + M: Material, +{ + /// The buffer containing the mappings. + buffer: RetainedRawBufferVec, + /// The range of bindless indices that this bindless index table covers. + /// + /// If this range is M..N, then the field at index $i$ maps to bindless + /// index $i$ + M. The size of this table is N - M. + /// + /// This corresponds to the `#[bindless(index_table(range(M..N)))]` + /// attribute in `AsBindGroup`. + index_range: Range, + /// The binding number that this index table is assigned to in the shader. + binding_number: BindingNumber, phantom: PhantomData, } -/// Information that the allocator keeps about each bind group. -pub enum MaterialBindGroup +/// A single binding array for storing bindless resources and the bookkeeping +/// necessary to allocate into it. +struct MaterialBindlessBindingArray where - M: Material, + R: GetBindingResourceId, { - /// Information that the allocator keeps about each bind group with bindless - /// textures in use. - Bindless(MaterialBindlessBindGroup), - - /// Information that the allocator keeps about each bind group for which - /// bindless textures are not in use. - NonBindless(MaterialNonBindlessBindGroup), -} - -/// Information that the allocator keeps about each bind group with bindless -/// textures in use. -pub struct MaterialBindlessBindGroup -where - M: Material, -{ - /// The actual bind group. - pub bind_group: Option, - - /// The bind group data for each slot. + /// The number of the binding that we attach this binding array to. + binding_number: BindingNumber, + /// A mapping from bindless slot index to the resource stored in that slot, + /// if any. + bindings: Vec>>, + /// The type of resource stored in this binding array. + resource_type: BindlessResourceType, + /// Maps a resource ID to the slot in which it's stored. /// - /// This is `None` if the slot is unallocated and `Some` if the slot is - /// full. - unprepared_bind_groups: Vec>>, + /// This is essentially the inverse mapping of [`Self::bindings`]. + resource_to_slot: HashMap, + /// A list of free slots in [`Self::bindings`] that contain no binding. + free_slots: Vec, + /// The number of allocated objects in this binding array. + len: u32, +} - /// A bitfield that contains a 0 if the slot is free or a 1 if the slot is - /// full. +/// A single resource (sampler, texture, or buffer) in a binding array. +/// +/// Resources hold a reference count, which specifies the number of materials +/// currently allocated within the slab that refer to this resource. When the +/// reference count drops to zero, the resource is freed. +struct MaterialBindlessBinding +where + R: GetBindingResourceId, +{ + /// The sampler, texture, or buffer. + resource: R, + /// The number of materials currently allocated within the containing slab + /// that use this resource. + ref_count: u32, +} + +/// The allocator that stores bind groups for non-bindless materials. +pub struct MaterialBindGroupNonBindlessAllocator +where + M: Material, +{ + /// A mapping from [`MaterialBindGroupIndex`] to the bind group allocated in + /// each slot. + bind_groups: Vec>>, + /// The bind groups that are dirty and need to be prepared. /// - /// We keep this value so that we can quickly find the next free slot when - /// we go to allocate. - used_slot_bitmap: u32, + /// To prepare the bind groups, call + /// [`MaterialBindGroupAllocator::prepare_bind_groups`]. + to_prepare: HashSet, + /// A list of free bind group indices. + free_indices: Vec, + phantom: PhantomData, } -/// Information that the allocator keeps about each bind group for which -/// bindless textures are not in use. -/// -/// When a bindless texture isn't in use, bind groups and material instances are -/// in 1:1 correspondence, and therefore there's only a single slot for extra -/// material data here. -pub struct MaterialNonBindlessBindGroup +/// A single bind group that a [`MaterialBindGroupNonBindlessAllocator`] is +/// currently managing. +enum MaterialNonBindlessAllocatedBindGroup where M: Material, { - /// The single allocation in a non-bindless bind group. - allocation: MaterialNonBindlessBindGroupAllocation, + /// An unprepared bind group. + /// + /// The allocator prepares all outstanding unprepared bind groups when + /// [`MaterialBindGroupNonBindlessAllocator::prepare_bind_groups`] is + /// called. + Unprepared { + /// The unprepared bind group, including extra data. + bind_group: UnpreparedBindGroup, + /// The layout of that bind group. + layout: BindGroupLayout, + }, + /// A bind group that's already been prepared. + Prepared { + bind_group: PreparedBindGroup, + #[expect(dead_code, reason = "These buffers are only referenced by bind groups")] + uniform_buffers: Vec, + }, } -/// The single allocation in a non-bindless bind group. -enum MaterialNonBindlessBindGroupAllocation -where - M: Material, -{ - /// The allocation is free. - Unallocated, - /// The allocation has been allocated, but not yet initialized. - Allocated, - /// The allocation is full and contains both a bind group and extra data. - Initialized(BindGroup, M::Data), +/// Dummy instances of various resources that we fill unused slots in binding +/// arrays with. +#[derive(Resource)] +pub struct FallbackBindlessResources { + /// A dummy filtering sampler. + filtering_sampler: Sampler, + /// A dummy non-filtering sampler. + non_filtering_sampler: Sampler, + /// A dummy comparison sampler. + comparison_sampler: Sampler, } -/// Where the GPU data for a material is located. +/// The `wgpu` ID of a single bindless or non-bindless resource. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +enum BindingResourceId { + /// A buffer. + Buffer(BufferId), + /// A texture view, with the given dimension. + TextureView(TextureViewDimension, TextureViewId), + /// A sampler. + Sampler(SamplerId), + /// A buffer containing plain old data. + /// + /// This corresponds to the `#[data]` structure-level attribute on + /// `AsBindGroup`. + DataBuffer, +} + +/// A temporary list of references to `wgpu` bindless resources. /// -/// In bindless mode, materials are gathered into bind groups, and the slot is -/// necessary to locate the material data within that group. If not in bindless -/// mode, bind groups and materials are in 1:1 correspondence, and the slot -/// index is always 0. +/// We need this because the `wgpu` bindless API takes a slice of references. +/// Thus we need to create intermediate vectors of bindless resources in order +/// to satisfy `wgpu`'s lifetime requirements. +enum BindingResourceArray<'a> { + /// A list of bindings. + Buffers(Vec>), + /// A list of texture views. + TextureViews(Vec<&'a WgpuTextureView>), + /// A list of samplers. + Samplers(Vec<&'a WgpuSampler>), +} + +/// The location of a material (either bindless or non-bindless) within the +/// slabs. #[derive(Clone, Copy, Debug, Default, Reflect)] +#[reflect(Clone, Default)] pub struct MaterialBindingId { /// The index of the bind group (slab) where the GPU data is located. pub group: MaterialBindGroupIndex, /// The slot within that bind group. + /// + /// Non-bindless materials will always have a slot of 0. pub slot: MaterialBindGroupSlot, } @@ -139,8 +290,8 @@ pub struct MaterialBindingId { /// /// In bindless mode, each bind group contains multiple materials. In /// non-bindless mode, each bind group contains only one material. -#[derive(Clone, Copy, Debug, Default, Reflect, PartialEq, Deref, DerefMut)] -#[reflect(Default)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, Reflect, Deref, DerefMut)] +#[reflect(Default, Clone, PartialEq, Hash)] pub struct MaterialBindGroupIndex(pub u32); impl From for MaterialBindGroupIndex { @@ -155,611 +306,574 @@ impl From for MaterialBindGroupIndex { /// In bindless mode, this slot is needed to locate the material data in each /// bind group, since multiple materials are packed into a single slab. In /// non-bindless mode, this slot is always 0. -#[derive(Clone, Copy, Debug, Default, Reflect, Deref, DerefMut)] -#[reflect(Default)] -pub struct MaterialBindGroupSlot(pub u16); +#[derive(Clone, Copy, Debug, Default, PartialEq, Reflect, Deref, DerefMut)] +#[reflect(Default, Clone, PartialEq)] +pub struct MaterialBindGroupSlot(pub u32); + +/// The CPU/GPU synchronization state of a buffer that we maintain. +/// +/// Currently, the only buffer that we maintain is the +/// [`MaterialBindlessIndexTable`]. +enum BufferDirtyState { + /// The buffer is currently synchronized between the CPU and GPU. + Clean, + /// The buffer hasn't been created yet. + NeedsReserve, + /// The buffer exists on both CPU and GPU, but the GPU data is out of date. + NeedsUpload, +} + +/// Information that describes a potential allocation of an +/// [`UnpreparedBindGroup`] into a slab. +struct BindlessAllocationCandidate { + /// A map that, for every resource in the [`UnpreparedBindGroup`] that + /// already existed in this slab, maps bindless index of that resource to + /// its slot in the appropriate binding array. + pre_existing_resources: HashMap, + /// Stores the number of free slots that are needed to satisfy this + /// allocation. + needed_free_slots: u32, +} + +/// A trait that allows fetching the [`BindingResourceId`] from a +/// [`BindlessResourceType`]. +/// +/// This is used when freeing bindless resources, in order to locate the IDs +/// assigned to each resource so that they can be removed from the appropriate +/// maps. +trait GetBindingResourceId { + /// Returns the [`BindingResourceId`] for this resource. + /// + /// `resource_type` specifies this resource's type. This is used for + /// textures, as a `wgpu` [`TextureView`] doesn't store enough information + /// itself to determine its dimension. + fn binding_resource_id(&self, resource_type: BindlessResourceType) -> BindingResourceId; +} + +/// The public interface to a slab, which represents a single bind group. +pub struct MaterialSlab<'a, M>(MaterialSlabImpl<'a, M>) +where + M: Material; + +/// The actual implementation of a material slab. +/// +/// This has bindless and non-bindless variants. +enum MaterialSlabImpl<'a, M> +where + M: Material, +{ + /// The implementation of the slab interface we use when the slab + /// is bindless. + Bindless(&'a MaterialBindlessSlab), + /// The implementation of the slab interface we use when the slab + /// is non-bindless. + NonBindless(MaterialNonBindlessSlab<'a, M>), +} + +/// A single bind group that the [`MaterialBindGroupNonBindlessAllocator`] +/// manages. +enum MaterialNonBindlessSlab<'a, M> +where + M: Material, +{ + /// A slab that has a bind group. + Prepared(&'a PreparedBindGroup), + /// A slab that doesn't yet have a bind group. + Unprepared(&'a UnpreparedBindGroup), +} + +/// Manages an array of untyped plain old data on GPU and allocates individual +/// slots within that array. +/// +/// This supports the `#[data]` attribute of `AsBindGroup`. +struct MaterialDataBuffer { + /// The number of the binding that we attach this storage buffer to. + binding_number: BindingNumber, + /// The actual data. + /// + /// Note that this is untyped (`u8`); the actual aligned size of each + /// element is given by [`Self::aligned_element_size`]; + buffer: RetainedRawBufferVec, + /// The size of each element in the buffer, including padding and alignment + /// if any. + aligned_element_size: u32, + /// A list of free slots within the buffer. + free_slots: Vec, + /// The actual number of slots that have been allocated. + len: u32, +} + +/// A buffer containing plain old data, already packed into the appropriate GPU +/// format, and that can be updated incrementally. +/// +/// This structure exists in order to encapsulate the lazy update +/// ([`BufferDirtyState`]) logic in a single place. +#[derive(Deref, DerefMut)] +struct RetainedRawBufferVec +where + T: Pod, +{ + /// The contents of the buffer. + #[deref] + buffer: RawBufferVec, + /// Whether the contents of the buffer have been uploaded to the GPU. + dirty: BufferDirtyState, +} + +/// The size of the buffer that we assign to unused buffer slots, in bytes. +/// +/// This is essentially arbitrary, as it doesn't seem to matter to `wgpu` what +/// the size is. +const DEFAULT_BINDLESS_FALLBACK_BUFFER_SIZE: u64 = 16; impl From for MaterialBindGroupSlot { fn from(value: u32) -> Self { - MaterialBindGroupSlot(value as u16) + MaterialBindGroupSlot(value) } } impl From for u32 { fn from(value: MaterialBindGroupSlot) -> Self { - value.0 as u32 + value.0 } } -/// A temporary data structure that contains references to bindless resources. -/// -/// We need this because the `wgpu` bindless API takes a slice of references. -/// Thus we need to create intermediate vectors of bindless resources in order -/// to satisfy the lifetime requirements. -enum BindingResourceArray<'a> { - Buffers(Vec>), - TextureViews(TextureViewDimension, Vec<&'a WgpuTextureView>), - Samplers(Vec<&'a WgpuSampler>), +impl<'a> From<&'a OwnedBindingResource> for BindingResourceId { + fn from(value: &'a OwnedBindingResource) -> Self { + match *value { + OwnedBindingResource::Buffer(ref buffer) => BindingResourceId::Buffer(buffer.id()), + OwnedBindingResource::Data(_) => BindingResourceId::DataBuffer, + OwnedBindingResource::TextureView(ref texture_view_dimension, ref texture_view) => { + BindingResourceId::TextureView(*texture_view_dimension, texture_view.id()) + } + OwnedBindingResource::Sampler(_, ref sampler) => { + BindingResourceId::Sampler(sampler.id()) + } + } + } } -/// Contains dummy resources that we use to pad out bindless arrays. -/// -/// On DX12, every binding array slot must be filled, so we have to fill unused -/// slots. -#[derive(Resource)] -pub struct FallbackBindlessResources { - /// A dummy sampler that we fill unused slots in bindless sampler arrays - /// with. - fallback_sampler: Sampler, +impl GetBindingResourceId for Buffer { + fn binding_resource_id(&self, _: BindlessResourceType) -> BindingResourceId { + BindingResourceId::Buffer(self.id()) + } } -struct MaterialFallbackBuffers(HashMap); +impl GetBindingResourceId for Sampler { + fn binding_resource_id(&self, _: BindlessResourceType) -> BindingResourceId { + BindingResourceId::Sampler(self.id()) + } +} -/// The minimum byte size of each fallback buffer. -const MIN_BUFFER_SIZE: u64 = 16; +impl GetBindingResourceId for TextureView { + fn binding_resource_id(&self, resource_type: BindlessResourceType) -> BindingResourceId { + let texture_view_dimension = match resource_type { + BindlessResourceType::Texture1d => TextureViewDimension::D1, + BindlessResourceType::Texture2d => TextureViewDimension::D2, + BindlessResourceType::Texture2dArray => TextureViewDimension::D2Array, + BindlessResourceType::Texture3d => TextureViewDimension::D3, + BindlessResourceType::TextureCube => TextureViewDimension::Cube, + BindlessResourceType::TextureCubeArray => TextureViewDimension::CubeArray, + _ => panic!("Resource type is not a texture"), + }; + BindingResourceId::TextureView(texture_view_dimension, self.id()) + } +} impl MaterialBindGroupAllocator where M: Material, { - /// Creates or recreates any bind groups that were modified this frame. + /// Creates a new [`MaterialBindGroupAllocator`] managing the data for a + /// single material. + fn new(render_device: &RenderDevice) -> MaterialBindGroupAllocator { + if material_uses_bindless_resources::(render_device) { + MaterialBindGroupAllocator::Bindless(Box::new(MaterialBindGroupBindlessAllocator::new( + render_device, + ))) + } else { + MaterialBindGroupAllocator::NonBindless(Box::new( + MaterialBindGroupNonBindlessAllocator::new(), + )) + } + } + + /// Returns the slab with the given index, if one exists. + pub fn get(&self, group: MaterialBindGroupIndex) -> Option> { + match *self { + MaterialBindGroupAllocator::Bindless(ref bindless_allocator) => bindless_allocator + .get(group) + .map(|bindless_slab| MaterialSlab(MaterialSlabImpl::Bindless(bindless_slab))), + MaterialBindGroupAllocator::NonBindless(ref non_bindless_allocator) => { + non_bindless_allocator.get(group).map(|non_bindless_slab| { + MaterialSlab(MaterialSlabImpl::NonBindless(non_bindless_slab)) + }) + } + } + } + + /// Allocates an [`UnpreparedBindGroup`] and returns the resulting binding ID. + /// + /// This method should generally be preferred over + /// [`Self::allocate_prepared`], because this method supports both bindless + /// and non-bindless bind groups. Only use [`Self::allocate_prepared`] if + /// you need to prepare the bind group yourself. + pub fn allocate_unprepared( + &mut self, + unprepared_bind_group: UnpreparedBindGroup, + bind_group_layout: &BindGroupLayout, + ) -> MaterialBindingId { + match *self { + MaterialBindGroupAllocator::Bindless( + ref mut material_bind_group_bindless_allocator, + ) => material_bind_group_bindless_allocator.allocate_unprepared(unprepared_bind_group), + MaterialBindGroupAllocator::NonBindless( + ref mut material_bind_group_non_bindless_allocator, + ) => material_bind_group_non_bindless_allocator + .allocate_unprepared(unprepared_bind_group, (*bind_group_layout).clone()), + } + } + + /// Places a pre-prepared bind group into a slab. + /// + /// For bindless materials, the allocator internally manages the bind + /// groups, so calling this method will panic if this is a bindless + /// allocator. Only non-bindless allocators support this method. + /// + /// It's generally preferred to use [`Self::allocate_unprepared`], because + /// that method supports both bindless and non-bindless allocators. Only use + /// this method if you need to prepare the bind group yourself. + pub fn allocate_prepared( + &mut self, + prepared_bind_group: PreparedBindGroup, + ) -> MaterialBindingId { + match *self { + MaterialBindGroupAllocator::Bindless(_) => { + panic!( + "Bindless resources are incompatible with implementing `as_bind_group` \ + directly; implement `unprepared_bind_group` instead or disable bindless" + ) + } + MaterialBindGroupAllocator::NonBindless(ref mut non_bindless_allocator) => { + non_bindless_allocator.allocate_prepared(prepared_bind_group) + } + } + } + + /// Deallocates the material with the given binding ID. + /// + /// Any resources that are no longer referenced are removed from the slab. + pub fn free(&mut self, material_binding_id: MaterialBindingId) { + match *self { + MaterialBindGroupAllocator::Bindless( + ref mut material_bind_group_bindless_allocator, + ) => material_bind_group_bindless_allocator.free(material_binding_id), + MaterialBindGroupAllocator::NonBindless( + ref mut material_bind_group_non_bindless_allocator, + ) => material_bind_group_non_bindless_allocator.free(material_binding_id), + } + } + + /// Recreates any bind groups corresponding to slabs that have been modified + /// since last calling [`MaterialBindGroupAllocator::prepare_bind_groups`]. pub fn prepare_bind_groups( &mut self, render_device: &RenderDevice, + fallback_bindless_resources: &FallbackBindlessResources, fallback_image: &FallbackImage, - fallback_resources: &FallbackBindlessResources, ) { - for bind_group in &mut self.bind_groups { - bind_group.rebuild_bind_group_if_necessary( + match *self { + MaterialBindGroupAllocator::Bindless( + ref mut material_bind_group_bindless_allocator, + ) => material_bind_group_bindless_allocator.prepare_bind_groups( + render_device, + fallback_bindless_resources, + fallback_image, + ), + MaterialBindGroupAllocator::NonBindless( + ref mut material_bind_group_non_bindless_allocator, + ) => material_bind_group_non_bindless_allocator.prepare_bind_groups(render_device), + } + } + + /// Uploads the contents of all buffers that this + /// [`MaterialBindGroupAllocator`] manages to the GPU. + /// + /// Non-bindless allocators don't currently manage any buffers, so this + /// method only has an effect for bindless allocators. + pub fn write_buffers(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) { + match *self { + MaterialBindGroupAllocator::Bindless( + ref mut material_bind_group_bindless_allocator, + ) => material_bind_group_bindless_allocator.write_buffers(render_device, render_queue), + MaterialBindGroupAllocator::NonBindless(_) => { + // Not applicable. + } + } + } +} + +impl MaterialBindlessIndexTable +where + M: Material, +{ + /// Creates a new [`MaterialBindlessIndexTable`] for a single slab. + fn new( + bindless_index_table_descriptor: &BindlessIndexTableDescriptor, + ) -> MaterialBindlessIndexTable { + // Preallocate space for one bindings table, so that there will always be a buffer. + let mut buffer = RetainedRawBufferVec::new(BufferUsages::STORAGE); + for _ in *bindless_index_table_descriptor.indices.start + ..*bindless_index_table_descriptor.indices.end + { + buffer.push(0); + } + + MaterialBindlessIndexTable { + buffer, + index_range: bindless_index_table_descriptor.indices.clone(), + binding_number: bindless_index_table_descriptor.binding_number, + phantom: PhantomData, + } + } + + /// Returns the bindings in the binding index table. + /// + /// If the current [`MaterialBindlessIndexTable::index_range`] is M..N, then + /// element *i* of the returned binding index table contains the slot of the + /// bindless resource with bindless index *i* + M. + fn get(&self, slot: MaterialBindGroupSlot) -> &[u32] { + let struct_size = *self.index_range.end as usize - *self.index_range.start as usize; + let start = struct_size * slot.0 as usize; + &self.buffer.values()[start..(start + struct_size)] + } + + /// Returns a single binding from the binding index table. + fn get_binding( + &self, + slot: MaterialBindGroupSlot, + bindless_index: BindlessIndex, + ) -> Option { + if bindless_index < self.index_range.start || bindless_index >= self.index_range.end { + return None; + } + self.get(slot) + .get((*bindless_index - *self.index_range.start) as usize) + .copied() + } + + fn table_length(&self) -> u32 { + self.index_range.end.0 - self.index_range.start.0 + } + + /// Updates the binding index table for a single material. + /// + /// The `allocated_resource_slots` map contains a mapping from the + /// [`BindlessIndex`] of each resource that the material references to the + /// slot that that resource occupies in the appropriate binding array. This + /// method serializes that map into a binding index table that the shader + /// can read. + fn set( + &mut self, + slot: MaterialBindGroupSlot, + allocated_resource_slots: &HashMap, + ) { + let table_len = self.table_length() as usize; + let range = (slot.0 as usize * table_len)..((slot.0 as usize + 1) * table_len); + while self.buffer.len() < range.end { + self.buffer.push(0); + } + + for (&bindless_index, &resource_slot) in allocated_resource_slots { + if self.index_range.contains(&bindless_index) { + self.buffer.set( + *bindless_index + range.start as u32 - *self.index_range.start, + resource_slot, + ); + } + } + + // Mark the buffer as needing to be recreated, in case we grew it. + self.buffer.dirty = BufferDirtyState::NeedsReserve; + } + + /// Returns the [`BindGroupEntry`] for the index table itself. + fn bind_group_entry(&self) -> BindGroupEntry { + BindGroupEntry { + binding: *self.binding_number, + resource: self + .buffer + .buffer() + .expect("Bindings buffer must exist") + .as_entire_binding(), + } + } +} + +impl RetainedRawBufferVec +where + T: Pod, +{ + /// Creates a new empty [`RetainedRawBufferVec`] supporting the given + /// [`BufferUsages`]. + fn new(buffer_usages: BufferUsages) -> RetainedRawBufferVec { + RetainedRawBufferVec { + buffer: RawBufferVec::new(buffer_usages), + dirty: BufferDirtyState::NeedsUpload, + } + } + + /// Recreates the GPU backing buffer if needed. + fn prepare(&mut self, render_device: &RenderDevice) { + match self.dirty { + BufferDirtyState::Clean | BufferDirtyState::NeedsUpload => {} + BufferDirtyState::NeedsReserve => { + let capacity = self.buffer.len(); + self.buffer.reserve(capacity, render_device); + self.dirty = BufferDirtyState::NeedsUpload; + } + } + } + + /// Writes the current contents of the buffer to the GPU if necessary. + fn write(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) { + match self.dirty { + BufferDirtyState::Clean => {} + BufferDirtyState::NeedsReserve | BufferDirtyState::NeedsUpload => { + self.buffer.write_buffer(render_device, render_queue); + self.dirty = BufferDirtyState::Clean; + } + } + } +} + +impl MaterialBindGroupBindlessAllocator +where + M: Material, +{ + /// Creates a new [`MaterialBindGroupBindlessAllocator`] managing the data + /// for a single bindless material. + fn new(render_device: &RenderDevice) -> MaterialBindGroupBindlessAllocator { + let bindless_descriptor = M::bindless_descriptor() + .expect("Non-bindless materials should use the non-bindless allocator"); + let fallback_buffers = bindless_descriptor + .buffers + .iter() + .map(|bindless_buffer_descriptor| { + ( + bindless_buffer_descriptor.bindless_index, + render_device.create_buffer(&BufferDescriptor { + label: Some("bindless fallback buffer"), + size: match bindless_buffer_descriptor.size { + Some(size) => size as u64, + None => DEFAULT_BINDLESS_FALLBACK_BUFFER_SIZE, + }, + usage: BufferUsages::STORAGE, + mapped_at_creation: false, + }), + ) + }) + .collect(); + + MaterialBindGroupBindlessAllocator { + slabs: vec![], + bind_group_layout: M::bind_group_layout(render_device), + bindless_descriptor, + fallback_buffers, + slab_capacity: M::bindless_slot_count() + .expect("Non-bindless materials should use the non-bindless allocator") + .resolve(), + } + } + + /// Allocates the resources for a single material into a slab and returns + /// the resulting ID. + /// + /// The returned [`MaterialBindingId`] can later be used to fetch the slab + /// that was used. + /// + /// This function can't fail. If all slabs are full, then a new slab is + /// created, and the material is allocated into it. + fn allocate_unprepared( + &mut self, + mut unprepared_bind_group: UnpreparedBindGroup, + ) -> MaterialBindingId { + for (slab_index, slab) in self.slabs.iter_mut().enumerate() { + trace!("Trying to allocate in slab {}", slab_index); + match slab.try_allocate(unprepared_bind_group, self.slab_capacity) { + Ok(slot) => { + return MaterialBindingId { + group: MaterialBindGroupIndex(slab_index as u32), + slot, + }; + } + Err(bind_group) => unprepared_bind_group = bind_group, + } + } + + let group = MaterialBindGroupIndex(self.slabs.len() as u32); + self.slabs + .push(MaterialBindlessSlab::new(&self.bindless_descriptor)); + + // Allocate into the newly-pushed slab. + let Ok(slot) = self + .slabs + .last_mut() + .expect("We just pushed a slab") + .try_allocate(unprepared_bind_group, self.slab_capacity) + else { + panic!("An allocation into an empty slab should always succeed") + }; + + MaterialBindingId { group, slot } + } + + /// Deallocates the material with the given binding ID. + /// + /// Any resources that are no longer referenced are removed from the slab. + fn free(&mut self, material_binding_id: MaterialBindingId) { + self.slabs + .get_mut(material_binding_id.group.0 as usize) + .expect("Slab should exist") + .free(material_binding_id.slot, &self.bindless_descriptor); + } + + /// Returns the slab with the given bind group index. + /// + /// A [`MaterialBindGroupIndex`] can be fetched from a + /// [`MaterialBindingId`]. + fn get(&self, group: MaterialBindGroupIndex) -> Option<&MaterialBindlessSlab> { + self.slabs.get(group.0 as usize) + } + + /// Recreates any bind groups corresponding to slabs that have been modified + /// since last calling + /// [`MaterialBindGroupBindlessAllocator::prepare_bind_groups`]. + fn prepare_bind_groups( + &mut self, + render_device: &RenderDevice, + fallback_bindless_resources: &FallbackBindlessResources, + fallback_image: &FallbackImage, + ) { + for slab in &mut self.slabs { + slab.prepare( render_device, &self.bind_group_layout, - fallback_image, - fallback_resources, + fallback_bindless_resources, &self.fallback_buffers, + fallback_image, + &self.bindless_descriptor, + self.slab_capacity, ); } } - /// Returns the bind group with the given index, if it exists. - #[inline] - pub fn get(&self, index: MaterialBindGroupIndex) -> Option<&MaterialBindGroup> { - self.bind_groups.get(index.0 as usize) - } - - /// Allocates a new binding slot and returns its ID. - pub fn allocate(&mut self) -> MaterialBindingId { - let group_index = self.free_bind_groups.pop().unwrap_or_else(|| { - let group_index = self.bind_groups.len() as u32; - self.bind_groups - .push(MaterialBindGroup::new(self.bindless_enabled)); - group_index - }); - - let bind_group = &mut self.bind_groups[group_index as usize]; - let slot_index = bind_group.allocate(); - - if !bind_group.is_full() { - self.free_bind_groups.push(group_index); - } - - MaterialBindingId { - group: group_index.into(), - slot: slot_index, - } - } - - /// Assigns an unprepared bind group to the group and slot specified in the - /// [`MaterialBindingId`]. - pub fn init( - &mut self, - render_device: &RenderDevice, - material_binding_id: MaterialBindingId, - unprepared_bind_group: UnpreparedBindGroup, - ) { - self.bind_groups[material_binding_id.group.0 as usize].init( - render_device, - &self.bind_group_layout, - material_binding_id.slot, - unprepared_bind_group, - ); - } - - /// Fills a slot directly with a custom bind group. + /// Writes any buffers that we're managing to the GPU. /// - /// This is only a meaningful operation for non-bindless bind groups. It's - /// rarely used, but see the `texture_binding_array` example for an example - /// demonstrating how this feature might see use in practice. - pub fn init_custom( - &mut self, - material_binding_id: MaterialBindingId, - bind_group: BindGroup, - bind_group_data: M::Data, - ) { - self.bind_groups[material_binding_id.group.0 as usize] - .init_custom(bind_group, bind_group_data); - } - - /// Marks the slot corresponding to the given [`MaterialBindingId`] as free. - pub fn free(&mut self, material_binding_id: MaterialBindingId) { - let bind_group = &mut self.bind_groups[material_binding_id.group.0 as usize]; - let was_full = bind_group.is_full(); - - bind_group.free(material_binding_id.slot); - - // If the group that this material belonged to was full, it now contains - // at least one free slot, so add the group to the `free_bind_groups` - // list. - if was_full { - debug_assert!(!self.free_bind_groups.contains(&material_binding_id.group.0)); - self.free_bind_groups.push(*material_binding_id.group); - } - } -} - -impl MaterialBindGroup -where - M: Material, -{ - /// Creates a new material bind group. - fn new(bindless: bool) -> MaterialBindGroup { - if bindless { - MaterialBindGroup::Bindless(MaterialBindlessBindGroup::new()) - } else { - MaterialBindGroup::NonBindless(MaterialNonBindlessBindGroup::new()) - } - } - - /// Allocates a new binding slot and returns its ID. - fn allocate(&mut self) -> MaterialBindGroupSlot { - match *self { - MaterialBindGroup::Bindless(ref mut material_bindless_bind_group) => { - material_bindless_bind_group.allocate() - } - MaterialBindGroup::NonBindless(ref mut material_non_bindless_bind_group) => { - material_non_bindless_bind_group.allocate() - } - } - } - - /// Assigns an unprepared bind group to the group and slot specified in the - /// [`MaterialBindingId`]. - fn init( - &mut self, - render_device: &RenderDevice, - bind_group_layout: &BindGroupLayout, - slot: MaterialBindGroupSlot, - unprepared_bind_group: UnpreparedBindGroup, - ) { - match *self { - MaterialBindGroup::Bindless(ref mut material_bindless_bind_group) => { - material_bindless_bind_group.init( - render_device, - bind_group_layout, - slot, - unprepared_bind_group, - ); - } - MaterialBindGroup::NonBindless(ref mut material_non_bindless_bind_group) => { - material_non_bindless_bind_group.init( - render_device, - bind_group_layout, - slot, - unprepared_bind_group, - ); - } - } - } - - /// Fills a slot directly with a custom bind group. - /// - /// This is only a meaningful operation for non-bindless bind groups. It's - /// rarely used, but see the `texture_binding_array` example for an example - /// demonstrating how this feature might see use in practice. - fn init_custom(&mut self, bind_group: BindGroup, extra_data: M::Data) { - match *self { - MaterialBindGroup::Bindless(_) => { - error!("Custom bind groups aren't supported in bindless mode"); - } - MaterialBindGroup::NonBindless(ref mut material_non_bindless_bind_group) => { - material_non_bindless_bind_group.init_custom(bind_group, extra_data); - } - } - } - - /// Marks the slot corresponding to the given [`MaterialBindGroupSlot`] as - /// free. - fn free(&mut self, material_bind_group_slot: MaterialBindGroupSlot) { - match *self { - MaterialBindGroup::Bindless(ref mut material_bindless_bind_group) => { - material_bindless_bind_group.free(material_bind_group_slot); - } - MaterialBindGroup::NonBindless(ref mut material_non_bindless_bind_group) => { - material_non_bindless_bind_group.free(material_bind_group_slot); - } - } - } - - /// Returns the actual bind group, or `None` if it hasn't been created yet. - pub fn get_bind_group(&self) -> Option<&BindGroup> { - match *self { - MaterialBindGroup::Bindless(ref material_bindless_bind_group) => { - material_bindless_bind_group.get_bind_group() - } - MaterialBindGroup::NonBindless(ref material_non_bindless_bind_group) => { - material_non_bindless_bind_group.get_bind_group() - } - } - } - - /// Returns true if all the slots are full or false if at least one slot in - /// this bind group is free. - fn is_full(&self) -> bool { - match *self { - MaterialBindGroup::Bindless(ref material_bindless_bind_group) => { - material_bindless_bind_group.is_full() - } - MaterialBindGroup::NonBindless(ref material_non_bindless_bind_group) => { - material_non_bindless_bind_group.is_full() - } - } - } - - /// Recreates the bind group for this material bind group containing the - /// data for every material in it. - fn rebuild_bind_group_if_necessary( - &mut self, - render_device: &RenderDevice, - bind_group_layout: &BindGroupLayout, - fallback_image: &FallbackImage, - fallback_bindless_resources: &FallbackBindlessResources, - fallback_buffers: &MaterialFallbackBuffers, - ) { - match *self { - MaterialBindGroup::Bindless(ref mut material_bindless_bind_group) => { - material_bindless_bind_group.rebuild_bind_group_if_necessary( - render_device, - bind_group_layout, - fallback_image, - fallback_bindless_resources, - fallback_buffers, - ); - } - MaterialBindGroup::NonBindless(_) => {} - } - } - - /// Returns the associated extra data for the material with the given slot. - pub fn get_extra_data(&self, slot: MaterialBindGroupSlot) -> &M::Data { - match *self { - MaterialBindGroup::Bindless(ref material_bindless_bind_group) => { - material_bindless_bind_group.get_extra_data(slot) - } - MaterialBindGroup::NonBindless(ref material_non_bindless_bind_group) => { - material_non_bindless_bind_group.get_extra_data(slot) - } - } - } -} - -impl MaterialBindlessBindGroup -where - M: Material, -{ - /// Returns a new bind group. - fn new() -> MaterialBindlessBindGroup { - let count = M::bindless_slot_count().unwrap_or(1); - - MaterialBindlessBindGroup { - bind_group: None, - unprepared_bind_groups: iter::repeat_with(|| None).take(count as usize).collect(), - used_slot_bitmap: 0, - } - } - - /// Allocates a new slot and returns its index. - /// - /// This bind group must not be full. - fn allocate(&mut self) -> MaterialBindGroupSlot { - debug_assert!(!self.is_full()); - - // Mark the slot as used. - let slot = self.used_slot_bitmap.trailing_ones(); - self.used_slot_bitmap |= 1 << slot; - - slot.into() - } - - /// Assigns the given unprepared bind group to the given slot. - fn init( - &mut self, - _: &RenderDevice, - _: &BindGroupLayout, - slot: MaterialBindGroupSlot, - unprepared_bind_group: UnpreparedBindGroup, - ) { - self.unprepared_bind_groups[slot.0 as usize] = Some(unprepared_bind_group); - - // Invalidate the cached bind group so that we rebuild it again. - self.bind_group = None; - } - - /// Marks the given slot as free. - fn free(&mut self, slot: MaterialBindGroupSlot) { - self.unprepared_bind_groups[slot.0 as usize] = None; - self.used_slot_bitmap &= !(1 << slot.0); - - // Invalidate the cached bind group so that we rebuild it again. - self.bind_group = None; - } - - /// Returns true if all the slots are full or false if at least one slot in - /// this bind group is free. - fn is_full(&self) -> bool { - self.used_slot_bitmap == (1 << (self.unprepared_bind_groups.len() as u32)) - 1 - } - - /// Returns the actual bind group, or `None` if it hasn't been created yet. - fn get_bind_group(&self) -> Option<&BindGroup> { - self.bind_group.as_ref() - } - - /// Recreates the bind group for this material bind group containing the - /// data for every material in it. - fn rebuild_bind_group_if_necessary( - &mut self, - render_device: &RenderDevice, - bind_group_layout: &BindGroupLayout, - fallback_image: &FallbackImage, - fallback_bindless_resources: &FallbackBindlessResources, - fallback_buffers: &MaterialFallbackBuffers, - ) { - if self.bind_group.is_some() { - return; - } - - let Some(first_bind_group) = self - .unprepared_bind_groups - .iter() - .find_map(|slot| slot.as_ref()) - else { - return; - }; - - // Creates the intermediate binding resource vectors. - let Some(binding_resource_arrays) = self.recreate_binding_resource_arrays( - first_bind_group, - fallback_image, - fallback_bindless_resources, - fallback_buffers, - ) else { - return; - }; - - // Now build the actual resource arrays for `wgpu`. - let entries = binding_resource_arrays - .iter() - .map(|&(&binding, ref binding_resource_array)| BindGroupEntry { - binding, - resource: match *binding_resource_array { - BindingResourceArray::Buffers(ref vec) => { - BindingResource::BufferArray(&vec[..]) - } - BindingResourceArray::TextureViews(_, ref vec) => { - BindingResource::TextureViewArray(&vec[..]) - } - BindingResourceArray::Samplers(ref vec) => { - BindingResource::SamplerArray(&vec[..]) - } - }, - }) - .collect::>(); - - self.bind_group = - Some(render_device.create_bind_group(M::label(), bind_group_layout, &entries)); - } - - /// Recreates the binding arrays for each material in this bind group. - fn recreate_binding_resource_arrays<'a>( - &'a self, - first_bind_group: &'a UnpreparedBindGroup, - fallback_image: &'a FallbackImage, - fallback_bindless_resources: &'a FallbackBindlessResources, - fallback_buffers: &'a MaterialFallbackBuffers, - ) -> Option)>> { - // Initialize the arrays. - let mut binding_resource_arrays = first_bind_group - .bindings - .iter() - .map(|(index, binding)| match *binding { - OwnedBindingResource::Buffer(..) => (index, BindingResourceArray::Buffers(vec![])), - OwnedBindingResource::TextureView(dimension, _) => { - (index, BindingResourceArray::TextureViews(dimension, vec![])) - } - OwnedBindingResource::Sampler(..) => { - (index, BindingResourceArray::Samplers(vec![])) - } - }) - .collect::>(); - - for maybe_unprepared_bind_group in self.unprepared_bind_groups.iter() { - match *maybe_unprepared_bind_group { - None => { - // Push dummy resources for this slot. - for binding_resource_array in &mut binding_resource_arrays { - match *binding_resource_array { - (binding, BindingResourceArray::Buffers(ref mut vec)) => { - vec.push(BufferBinding { - buffer: &fallback_buffers.0[binding], - offset: 0, - size: None, - }); - } - ( - _, - BindingResourceArray::TextureViews(texture_dimension, ref mut vec), - ) => vec.push(&fallback_image.get(texture_dimension).texture_view), - (_, BindingResourceArray::Samplers(ref mut vec)) => { - vec.push(&fallback_bindless_resources.fallback_sampler); - } - } - } - } - - Some(ref unprepared_bind_group) => { - // Push the resources for this slot. - // - // All materials in this group must have the same type of - // binding (buffer, texture view, sampler) in each bind - // group entry. - for ( - binding_index, - (&mut (binding, ref mut binding_resource_array), (_, binding_resource)), - ) in binding_resource_arrays - .iter_mut() - .zip(unprepared_bind_group.bindings.0.iter()) - .enumerate() - { - match (binding_resource_array, binding_resource) { - ( - &mut BindingResourceArray::Buffers(ref mut vec), - OwnedBindingResource::Buffer(buffer), - ) => match NonZero::new(buffer.size()) { - None => vec.push(BufferBinding { - buffer: &fallback_buffers.0[binding], - offset: 0, - size: None, - }), - Some(size) => vec.push(BufferBinding { - buffer, - offset: 0, - size: Some(size), - }), - }, - ( - &mut BindingResourceArray::TextureViews(_, ref mut vec), - OwnedBindingResource::TextureView(_, texture_view), - ) => vec.push(texture_view), - ( - &mut BindingResourceArray::Samplers(ref mut vec), - OwnedBindingResource::Sampler(sampler), - ) => vec.push(sampler), - _ => { - error!( - "Mismatched bind group layouts for material \ - {} at bind group {}; can't combine bind \ - groups into a single bindless bind group!", - any::type_name::(), - binding_index, - ); - return None; - } - } - } - } - } - } - - Some(binding_resource_arrays) - } - - /// Returns the associated extra data for the material with the given slot. - fn get_extra_data(&self, slot: MaterialBindGroupSlot) -> &M::Data { - &self.unprepared_bind_groups[slot.0 as usize] - .as_ref() - .unwrap() - .data - } -} - -impl MaterialNonBindlessBindGroup -where - M: Material, -{ - /// Creates a new material bind group. - fn new() -> MaterialNonBindlessBindGroup { - MaterialNonBindlessBindGroup { - allocation: MaterialNonBindlessBindGroupAllocation::Unallocated, - } - } - - /// Allocates a new slot and returns its index. - /// - /// This bind group must not be full. - fn allocate(&mut self) -> MaterialBindGroupSlot { - debug_assert!(!self.is_full()); - self.allocation = MaterialNonBindlessBindGroupAllocation::Allocated; - MaterialBindGroupSlot(0) - } - - /// Assigns an unprepared bind group to the group and slot specified in the - /// [`MaterialBindingId`]. - /// - /// For non-bindless bind groups, we go ahead and create the bind group - /// immediately. - fn init( - &mut self, - render_device: &RenderDevice, - bind_group_layout: &BindGroupLayout, - _: MaterialBindGroupSlot, - unprepared_bind_group: UnpreparedBindGroup, - ) { - let entries = unprepared_bind_group - .bindings - .iter() - .map(|(index, binding)| BindGroupEntry { - binding: *index, - resource: binding.get_binding(), - }) - .collect::>(); - - self.allocation = MaterialNonBindlessBindGroupAllocation::Initialized( - render_device.create_bind_group(M::label(), bind_group_layout, &entries), - unprepared_bind_group.data, - ); - } - - /// Fills the slot directly with a custom bind group. - /// - /// This is only a meaningful operation for non-bindless bind groups. It's - /// rarely used, but see the `texture_binding_array` example for an example - /// demonstrating how this feature might see use in practice. - fn init_custom(&mut self, bind_group: BindGroup, extra_data: M::Data) { - self.allocation = - MaterialNonBindlessBindGroupAllocation::Initialized(bind_group, extra_data); - } - - /// Deletes the stored bind group. - fn free(&mut self, _: MaterialBindGroupSlot) { - self.allocation = MaterialNonBindlessBindGroupAllocation::Unallocated; - } - - /// Returns true if the slot is full or false if it's free. - fn is_full(&self) -> bool { - !matches!( - self.allocation, - MaterialNonBindlessBindGroupAllocation::Unallocated - ) - } - - /// Returns the actual bind group, or `None` if it hasn't been created yet. - fn get_bind_group(&self) -> Option<&BindGroup> { - match self.allocation { - MaterialNonBindlessBindGroupAllocation::Unallocated - | MaterialNonBindlessBindGroupAllocation::Allocated => None, - MaterialNonBindlessBindGroupAllocation::Initialized(ref bind_group, _) => { - Some(bind_group) - } - } - } - - /// Returns the associated extra data for the material. - fn get_extra_data(&self, _: MaterialBindGroupSlot) -> &M::Data { - match self.allocation { - MaterialNonBindlessBindGroupAllocation::Initialized(_, ref extra_data) => extra_data, - MaterialNonBindlessBindGroupAllocation::Unallocated - | MaterialNonBindlessBindGroupAllocation::Allocated => { - panic!("Bind group not initialized") - } + /// Currently, this only consists of the bindless index tables. + fn write_buffers(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) { + for slab in &mut self.slabs { + slab.write_buffer(render_device, render_queue); } } } @@ -769,20 +883,798 @@ where M: Material, { fn from_world(world: &mut World) -> Self { - // Create a new bind group allocator. let render_device = world.resource::(); - let bind_group_layout_entries = M::bind_group_layout_entries(render_device, false); - let bind_group_layout = - render_device.create_bind_group_layout(M::label(), &bind_group_layout_entries); - let fallback_buffers = - MaterialFallbackBuffers::new(render_device, &bind_group_layout_entries); - MaterialBindGroupAllocator { - bind_groups: vec![], - free_bind_groups: vec![], + MaterialBindGroupAllocator::new(render_device) + } +} + +impl MaterialBindlessSlab +where + M: Material, +{ + /// Attempts to allocate the given unprepared bind group in this slab. + /// + /// If the allocation succeeds, this method returns the slot that the + /// allocation was placed in. If the allocation fails because the slab was + /// full, this method returns the unprepared bind group back to the caller + /// so that it can try to allocate again. + fn try_allocate( + &mut self, + unprepared_bind_group: UnpreparedBindGroup, + slot_capacity: u32, + ) -> Result> { + // Locate pre-existing resources, and determine how many free slots we need. + let Some(allocation_candidate) = self.check_allocation(&unprepared_bind_group) else { + return Err(unprepared_bind_group); + }; + + // Check to see if we have enough free space. + // + // As a special case, note that if *nothing* is allocated in this slab, + // then we always allow a material to be placed in it, regardless of the + // number of bindings the material has. This is so that, if the + // platform's maximum bindless count is set too low to hold even a + // single material, we can still place each material into a separate + // slab instead of failing outright. + if self.allocated_resource_count > 0 + && self.allocated_resource_count + allocation_candidate.needed_free_slots + > slot_capacity + { + trace!("Slab is full, can't allocate"); + return Err(unprepared_bind_group); + } + + // OK, we can allocate in this slab. Assign a slot ID. + let slot = self + .free_slots + .pop() + .unwrap_or(MaterialBindGroupSlot(self.live_allocation_count)); + + // Bump the live allocation count. + self.live_allocation_count += 1; + + // Insert the resources into the binding arrays. + let allocated_resource_slots = + self.insert_resources(unprepared_bind_group.bindings, allocation_candidate); + + // Serialize the allocated resource slots. + for bindless_index_table in &mut self.bindless_index_tables { + bindless_index_table.set(slot, &allocated_resource_slots); + } + + // Insert extra data. + if self.extra_data.len() < (*slot as usize + 1) { + self.extra_data.resize_with(*slot as usize + 1, || None); + } + self.extra_data[*slot as usize] = Some(unprepared_bind_group.data); + + // Invalidate the cached bind group. + self.bind_group = None; + + Ok(slot) + } + + /// Gathers the information needed to determine whether the given unprepared + /// bind group can be allocated in this slab. + fn check_allocation( + &self, + unprepared_bind_group: &UnpreparedBindGroup, + ) -> Option { + let mut allocation_candidate = BindlessAllocationCandidate { + pre_existing_resources: HashMap::default(), + needed_free_slots: 0, + }; + + for &(bindless_index, ref owned_binding_resource) in unprepared_bind_group.bindings.iter() { + let bindless_index = BindlessIndex(bindless_index); + match *owned_binding_resource { + OwnedBindingResource::Buffer(ref buffer) => { + let Some(binding_array) = self.buffers.get(&bindless_index) else { + error!( + "Binding array wasn't present for buffer at index {:?}", + bindless_index + ); + return None; + }; + match binding_array.find(BindingResourceId::Buffer(buffer.id())) { + Some(slot) => { + allocation_candidate + .pre_existing_resources + .insert(bindless_index, slot); + } + None => allocation_candidate.needed_free_slots += 1, + } + } + + OwnedBindingResource::Data(_) => { + // The size of a data buffer is unlimited. + } + + OwnedBindingResource::TextureView(texture_view_dimension, ref texture_view) => { + let bindless_resource_type = BindlessResourceType::from(texture_view_dimension); + match self + .textures + .get(&bindless_resource_type) + .expect("Missing binding array for texture") + .find(BindingResourceId::TextureView( + texture_view_dimension, + texture_view.id(), + )) { + Some(slot) => { + allocation_candidate + .pre_existing_resources + .insert(bindless_index, slot); + } + None => { + allocation_candidate.needed_free_slots += 1; + } + } + } + + OwnedBindingResource::Sampler(sampler_binding_type, ref sampler) => { + let bindless_resource_type = BindlessResourceType::from(sampler_binding_type); + match self + .samplers + .get(&bindless_resource_type) + .expect("Missing binding array for sampler") + .find(BindingResourceId::Sampler(sampler.id())) + { + Some(slot) => { + allocation_candidate + .pre_existing_resources + .insert(bindless_index, slot); + } + None => { + allocation_candidate.needed_free_slots += 1; + } + } + } + } + } + + Some(allocation_candidate) + } + + /// Inserts the given [`BindingResources`] into this slab. + /// + /// Returns a table that maps the bindless index of each resource to its + /// slot in its binding array. + fn insert_resources( + &mut self, + mut binding_resources: BindingResources, + allocation_candidate: BindlessAllocationCandidate, + ) -> HashMap { + let mut allocated_resource_slots = HashMap::default(); + + for (bindless_index, owned_binding_resource) in binding_resources.drain(..) { + let bindless_index = BindlessIndex(bindless_index); + // If this is an other reference to an object we've already + // allocated, just bump its reference count. + if let Some(pre_existing_resource_slot) = allocation_candidate + .pre_existing_resources + .get(&bindless_index) + { + allocated_resource_slots.insert(bindless_index, *pre_existing_resource_slot); + + match owned_binding_resource { + OwnedBindingResource::Buffer(_) => { + self.buffers + .get_mut(&bindless_index) + .expect("Buffer binding array should exist") + .bindings + .get_mut(*pre_existing_resource_slot as usize) + .and_then(|binding| binding.as_mut()) + .expect("Slot should exist") + .ref_count += 1; + } + + OwnedBindingResource::Data(_) => { + panic!("Data buffers can't be deduplicated") + } + + OwnedBindingResource::TextureView(texture_view_dimension, _) => { + let bindless_resource_type = + BindlessResourceType::from(texture_view_dimension); + self.textures + .get_mut(&bindless_resource_type) + .expect("Texture binding array should exist") + .bindings + .get_mut(*pre_existing_resource_slot as usize) + .and_then(|binding| binding.as_mut()) + .expect("Slot should exist") + .ref_count += 1; + } + + OwnedBindingResource::Sampler(sampler_binding_type, _) => { + let bindless_resource_type = + BindlessResourceType::from(sampler_binding_type); + self.samplers + .get_mut(&bindless_resource_type) + .expect("Sampler binding array should exist") + .bindings + .get_mut(*pre_existing_resource_slot as usize) + .and_then(|binding| binding.as_mut()) + .expect("Slot should exist") + .ref_count += 1; + } + } + + continue; + } + + // Otherwise, we need to insert it anew. + let binding_resource_id = BindingResourceId::from(&owned_binding_resource); + match owned_binding_resource { + OwnedBindingResource::Buffer(buffer) => { + let slot = self + .buffers + .get_mut(&bindless_index) + .expect("Buffer binding array should exist") + .insert(binding_resource_id, buffer); + allocated_resource_slots.insert(bindless_index, slot); + } + OwnedBindingResource::Data(data) => { + let slot = self + .data_buffers + .get_mut(&bindless_index) + .expect("Data buffer binding array should exist") + .insert(&data); + allocated_resource_slots.insert(bindless_index, slot); + } + OwnedBindingResource::TextureView(texture_view_dimension, texture_view) => { + let bindless_resource_type = BindlessResourceType::from(texture_view_dimension); + let slot = self + .textures + .get_mut(&bindless_resource_type) + .expect("Texture array should exist") + .insert(binding_resource_id, texture_view); + allocated_resource_slots.insert(bindless_index, slot); + } + OwnedBindingResource::Sampler(sampler_binding_type, sampler) => { + let bindless_resource_type = BindlessResourceType::from(sampler_binding_type); + let slot = self + .samplers + .get_mut(&bindless_resource_type) + .expect("Sampler should exist") + .insert(binding_resource_id, sampler); + allocated_resource_slots.insert(bindless_index, slot); + } + } + + // Bump the allocated resource count. + self.allocated_resource_count += 1; + } + + allocated_resource_slots + } + + /// Removes the material allocated in the given slot, with the given + /// descriptor, from this slab. + fn free(&mut self, slot: MaterialBindGroupSlot, bindless_descriptor: &BindlessDescriptor) { + // Loop through each binding. + for (bindless_index, bindless_resource_type) in + bindless_descriptor.resources.iter().enumerate() + { + let bindless_index = BindlessIndex::from(bindless_index as u32); + let Some(bindless_index_table) = self.get_bindless_index_table(bindless_index) else { + continue; + }; + let Some(bindless_binding) = bindless_index_table.get_binding(slot, bindless_index) + else { + continue; + }; + + // Free the binding. If the resource in question was anything other + // than a data buffer, then it has a reference count and + // consequently we need to decrement it. + let decrement_allocated_resource_count = match *bindless_resource_type { + BindlessResourceType::None => false, + BindlessResourceType::Buffer => self + .buffers + .get_mut(&bindless_index) + .expect("Buffer should exist with that bindless index") + .remove(bindless_binding), + BindlessResourceType::DataBuffer => { + self.data_buffers + .get_mut(&bindless_index) + .expect("Data buffer should exist with that bindless index") + .remove(bindless_binding); + false + } + BindlessResourceType::SamplerFiltering + | BindlessResourceType::SamplerNonFiltering + | BindlessResourceType::SamplerComparison => self + .samplers + .get_mut(bindless_resource_type) + .expect("Sampler array should exist") + .remove(bindless_binding), + BindlessResourceType::Texture1d + | BindlessResourceType::Texture2d + | BindlessResourceType::Texture2dArray + | BindlessResourceType::Texture3d + | BindlessResourceType::TextureCube + | BindlessResourceType::TextureCubeArray => self + .textures + .get_mut(bindless_resource_type) + .expect("Texture array should exist") + .remove(bindless_binding), + }; + + // If the slot is now free, decrement the allocated resource + // count. + if decrement_allocated_resource_count { + self.allocated_resource_count -= 1; + } + } + + // Clear out the extra data. + self.extra_data[slot.0 as usize] = None; + + // Invalidate the cached bind group. + self.bind_group = None; + + // Release the slot ID. + self.free_slots.push(slot); + self.live_allocation_count -= 1; + } + + /// Recreates the bind group and bindless index table buffer if necessary. + fn prepare( + &mut self, + render_device: &RenderDevice, + bind_group_layout: &BindGroupLayout, + fallback_bindless_resources: &FallbackBindlessResources, + fallback_buffers: &HashMap, + fallback_image: &FallbackImage, + bindless_descriptor: &BindlessDescriptor, + slab_capacity: u32, + ) { + // Create the bindless index table buffers if needed. + for bindless_index_table in &mut self.bindless_index_tables { + bindless_index_table.buffer.prepare(render_device); + } + + // Create any data buffers we were managing if necessary. + for data_buffer in self.data_buffers.values_mut() { + data_buffer.buffer.prepare(render_device); + } + + // Create the bind group if needed. + self.prepare_bind_group( + render_device, bind_group_layout, + fallback_bindless_resources, fallback_buffers, - bindless_enabled: material_uses_bindless_resources::(render_device), - phantom: PhantomData, + fallback_image, + bindless_descriptor, + slab_capacity, + ); + } + + /// Recreates the bind group if this slab has been changed since the last + /// time we created it. + fn prepare_bind_group( + &mut self, + render_device: &RenderDevice, + bind_group_layout: &BindGroupLayout, + fallback_bindless_resources: &FallbackBindlessResources, + fallback_buffers: &HashMap, + fallback_image: &FallbackImage, + bindless_descriptor: &BindlessDescriptor, + slab_capacity: u32, + ) { + // If the bind group is clean, then do nothing. + if self.bind_group.is_some() { + return; + } + + // Determine whether we need to pad out our binding arrays with dummy + // resources. + let required_binding_array_size = if render_device + .features() + .contains(WgpuFeatures::PARTIALLY_BOUND_BINDING_ARRAY) + { + None + } else { + Some(slab_capacity) + }; + + let binding_resource_arrays = self.create_binding_resource_arrays( + fallback_bindless_resources, + fallback_buffers, + fallback_image, + bindless_descriptor, + required_binding_array_size, + ); + + let mut bind_group_entries: Vec<_> = self + .bindless_index_tables + .iter() + .map(|bindless_index_table| bindless_index_table.bind_group_entry()) + .collect(); + + for &(&binding, ref binding_resource_array) in binding_resource_arrays.iter() { + bind_group_entries.push(BindGroupEntry { + binding, + resource: match *binding_resource_array { + BindingResourceArray::Buffers(ref buffer_bindings) => { + BindingResource::BufferArray(&buffer_bindings[..]) + } + BindingResourceArray::TextureViews(ref texture_views) => { + BindingResource::TextureViewArray(&texture_views[..]) + } + BindingResourceArray::Samplers(ref samplers) => { + BindingResource::SamplerArray(&samplers[..]) + } + }, + }); + } + + // Create bind group entries for any data buffers we're managing. + for data_buffer in self.data_buffers.values() { + bind_group_entries.push(BindGroupEntry { + binding: *data_buffer.binding_number, + resource: data_buffer + .buffer + .buffer() + .expect("Backing data buffer must have been uploaded by now") + .as_entire_binding(), + }); + } + + self.bind_group = Some(render_device.create_bind_group( + M::label(), + bind_group_layout, + &bind_group_entries, + )); + } + + /// Writes any buffers that we're managing to the GPU. + /// + /// Currently, this consists of the bindless index table plus any data + /// buffers we're managing. + fn write_buffer(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) { + for bindless_index_table in &mut self.bindless_index_tables { + bindless_index_table + .buffer + .write(render_device, render_queue); + } + + for data_buffer in self.data_buffers.values_mut() { + data_buffer.buffer.write(render_device, render_queue); + } + } + + /// Converts our binding arrays into binding resource arrays suitable for + /// passing to `wgpu`. + fn create_binding_resource_arrays<'a>( + &'a self, + fallback_bindless_resources: &'a FallbackBindlessResources, + fallback_buffers: &'a HashMap, + fallback_image: &'a FallbackImage, + bindless_descriptor: &'a BindlessDescriptor, + required_binding_array_size: Option, + ) -> Vec<(&'a u32, BindingResourceArray<'a>)> { + let mut binding_resource_arrays = vec![]; + + // Build sampler bindings. + self.create_sampler_binding_resource_arrays( + &mut binding_resource_arrays, + fallback_bindless_resources, + required_binding_array_size, + ); + + // Build texture bindings. + self.create_texture_binding_resource_arrays( + &mut binding_resource_arrays, + fallback_image, + required_binding_array_size, + ); + + // Build buffer bindings. + self.create_buffer_binding_resource_arrays( + &mut binding_resource_arrays, + fallback_buffers, + bindless_descriptor, + required_binding_array_size, + ); + + binding_resource_arrays + } + + /// Accumulates sampler binding arrays into binding resource arrays suitable + /// for passing to `wgpu`. + fn create_sampler_binding_resource_arrays<'a, 'b>( + &'a self, + binding_resource_arrays: &'b mut Vec<(&'a u32, BindingResourceArray<'a>)>, + fallback_bindless_resources: &'a FallbackBindlessResources, + required_binding_array_size: Option, + ) { + // We have one binding resource array per sampler type. + for (bindless_resource_type, fallback_sampler) in [ + ( + BindlessResourceType::SamplerFiltering, + &fallback_bindless_resources.filtering_sampler, + ), + ( + BindlessResourceType::SamplerNonFiltering, + &fallback_bindless_resources.non_filtering_sampler, + ), + ( + BindlessResourceType::SamplerComparison, + &fallback_bindless_resources.comparison_sampler, + ), + ] { + let mut sampler_bindings = vec![]; + + match self.samplers.get(&bindless_resource_type) { + Some(sampler_bindless_binding_array) => { + for maybe_bindless_binding in sampler_bindless_binding_array.bindings.iter() { + match *maybe_bindless_binding { + Some(ref bindless_binding) => { + sampler_bindings.push(&*bindless_binding.resource); + } + None => sampler_bindings.push(&**fallback_sampler), + } + } + } + + None => { + // Fill with a single fallback sampler. + sampler_bindings.push(&**fallback_sampler); + } + } + + if let Some(required_binding_array_size) = required_binding_array_size { + sampler_bindings.extend(iter::repeat_n( + &**fallback_sampler, + required_binding_array_size as usize - sampler_bindings.len(), + )); + } + + let binding_number = bindless_resource_type + .binding_number() + .expect("Sampler bindless resource type must have a binding number"); + + binding_resource_arrays.push(( + &**binding_number, + BindingResourceArray::Samplers(sampler_bindings), + )); + } + } + + /// Accumulates texture binding arrays into binding resource arrays suitable + /// for passing to `wgpu`. + fn create_texture_binding_resource_arrays<'a, 'b>( + &'a self, + binding_resource_arrays: &'b mut Vec<(&'a u32, BindingResourceArray<'a>)>, + fallback_image: &'a FallbackImage, + required_binding_array_size: Option, + ) { + for (bindless_resource_type, fallback_image) in [ + (BindlessResourceType::Texture1d, &fallback_image.d1), + (BindlessResourceType::Texture2d, &fallback_image.d2), + ( + BindlessResourceType::Texture2dArray, + &fallback_image.d2_array, + ), + (BindlessResourceType::Texture3d, &fallback_image.d3), + (BindlessResourceType::TextureCube, &fallback_image.cube), + ( + BindlessResourceType::TextureCubeArray, + &fallback_image.cube_array, + ), + ] { + let mut texture_bindings = vec![]; + + let binding_number = bindless_resource_type + .binding_number() + .expect("Texture bindless resource type must have a binding number"); + + match self.textures.get(&bindless_resource_type) { + Some(texture_bindless_binding_array) => { + for maybe_bindless_binding in texture_bindless_binding_array.bindings.iter() { + match *maybe_bindless_binding { + Some(ref bindless_binding) => { + texture_bindings.push(&*bindless_binding.resource); + } + None => texture_bindings.push(&*fallback_image.texture_view), + } + } + } + + None => { + // Fill with a single fallback image. + texture_bindings.push(&*fallback_image.texture_view); + } + } + + if let Some(required_binding_array_size) = required_binding_array_size { + texture_bindings.extend(iter::repeat_n( + &*fallback_image.texture_view, + required_binding_array_size as usize - texture_bindings.len(), + )); + } + + binding_resource_arrays.push(( + binding_number, + BindingResourceArray::TextureViews(texture_bindings), + )); + } + } + + /// Accumulates buffer binding arrays into binding resource arrays suitable + /// for `wgpu`. + fn create_buffer_binding_resource_arrays<'a, 'b>( + &'a self, + binding_resource_arrays: &'b mut Vec<(&'a u32, BindingResourceArray<'a>)>, + fallback_buffers: &'a HashMap, + bindless_descriptor: &'a BindlessDescriptor, + required_binding_array_size: Option, + ) { + for bindless_buffer_descriptor in bindless_descriptor.buffers.iter() { + let Some(buffer_bindless_binding_array) = + self.buffers.get(&bindless_buffer_descriptor.bindless_index) + else { + // This is OK, because index buffers are present in + // `BindlessDescriptor::buffers` but not in + // `BindlessDescriptor::resources`. + continue; + }; + + let fallback_buffer = fallback_buffers + .get(&bindless_buffer_descriptor.bindless_index) + .expect("Fallback buffer should exist"); + + let mut buffer_bindings: Vec<_> = buffer_bindless_binding_array + .bindings + .iter() + .map(|maybe_bindless_binding| { + let buffer = match *maybe_bindless_binding { + None => fallback_buffer, + Some(ref bindless_binding) => &bindless_binding.resource, + }; + BufferBinding { + buffer, + offset: 0, + size: None, + } + }) + .collect(); + + if let Some(required_binding_array_size) = required_binding_array_size { + buffer_bindings.extend(iter::repeat_n( + BufferBinding { + buffer: fallback_buffer, + offset: 0, + size: None, + }, + required_binding_array_size as usize - buffer_bindings.len(), + )); + } + + binding_resource_arrays.push(( + &*buffer_bindless_binding_array.binding_number, + BindingResourceArray::Buffers(buffer_bindings), + )); + } + } + + /// Returns the [`BindGroup`] corresponding to this slab, if it's been + /// prepared. + fn bind_group(&self) -> Option<&BindGroup> { + self.bind_group.as_ref() + } + + /// Returns the extra data associated with this material. + fn get_extra_data(&self, slot: MaterialBindGroupSlot) -> &M::Data { + self.extra_data + .get(slot.0 as usize) + .and_then(|data| data.as_ref()) + .expect("Extra data not present") + } + + /// Returns the bindless index table containing the given bindless index. + fn get_bindless_index_table( + &self, + bindless_index: BindlessIndex, + ) -> Option<&MaterialBindlessIndexTable> { + let table_index = self + .bindless_index_tables + .binary_search_by(|bindless_index_table| { + if bindless_index < bindless_index_table.index_range.start { + Ordering::Less + } else if bindless_index >= bindless_index_table.index_range.end { + Ordering::Greater + } else { + Ordering::Equal + } + }) + .ok()?; + self.bindless_index_tables.get(table_index) + } +} + +impl MaterialBindlessBindingArray +where + R: GetBindingResourceId, +{ + /// Creates a new [`MaterialBindlessBindingArray`] with the given binding + /// number, managing resources of the given type. + fn new( + binding_number: BindingNumber, + resource_type: BindlessResourceType, + ) -> MaterialBindlessBindingArray { + MaterialBindlessBindingArray { + binding_number, + bindings: vec![], + resource_type, + resource_to_slot: HashMap::default(), + free_slots: vec![], + len: 0, + } + } + + /// Returns the slot corresponding to the given resource, if that resource + /// is located in this binding array. + /// + /// If the resource isn't in this binding array, this method returns `None`. + fn find(&self, binding_resource_id: BindingResourceId) -> Option { + self.resource_to_slot.get(&binding_resource_id).copied() + } + + /// Inserts a bindless resource into a binding array and returns the index + /// of the slot it was inserted into. + fn insert(&mut self, binding_resource_id: BindingResourceId, resource: R) -> u32 { + let slot = self.free_slots.pop().unwrap_or(self.len); + self.resource_to_slot.insert(binding_resource_id, slot); + + if self.bindings.len() < slot as usize + 1 { + self.bindings.resize_with(slot as usize + 1, || None); + } + self.bindings[slot as usize] = Some(MaterialBindlessBinding::new(resource)); + + self.len += 1; + slot + } + + /// Removes a reference to an object from the slot. + /// + /// If the reference count dropped to 0 and the object was freed, this + /// method returns true. If the object was still referenced after removing + /// it, returns false. + fn remove(&mut self, slot: u32) -> bool { + let maybe_binding = &mut self.bindings[slot as usize]; + let binding = maybe_binding + .as_mut() + .expect("Attempted to free an already-freed binding"); + + binding.ref_count -= 1; + if binding.ref_count != 0 { + return false; + } + + let binding_resource_id = binding.resource.binding_resource_id(self.resource_type); + self.resource_to_slot.remove(&binding_resource_id); + + *maybe_binding = None; + self.free_slots.push(slot); + self.len -= 1; + true + } +} + +impl MaterialBindlessBinding +where + R: GetBindingResourceId, +{ + /// Creates a new [`MaterialBindlessBinding`] for a freshly-added resource. + /// + /// The reference count is initialized to 1. + fn new(resource: R) -> MaterialBindlessBinding { + MaterialBindlessBinding { + resource, + ref_count: 1, } } } @@ -796,54 +1688,403 @@ pub fn material_uses_bindless_resources(render_device: &RenderDevice) -> bool where M: Material, { - M::bindless_slot_count().is_some() && M::bindless_supported(render_device) + M::bindless_slot_count().is_some_and(|bindless_slot_count| { + M::bindless_supported(render_device) && bindless_slot_count.resolve() > 1 + }) +} + +impl MaterialBindlessSlab +where + M: Material, +{ + /// Creates a new [`MaterialBindlessSlab`] for a material with the given + /// bindless descriptor. + /// + /// We use this when no existing slab could hold a material to be allocated. + fn new(bindless_descriptor: &BindlessDescriptor) -> MaterialBindlessSlab { + let mut buffers = HashMap::default(); + let mut samplers = HashMap::default(); + let mut textures = HashMap::default(); + let mut data_buffers = HashMap::default(); + + for (bindless_index, bindless_resource_type) in + bindless_descriptor.resources.iter().enumerate() + { + let bindless_index = BindlessIndex(bindless_index as u32); + match *bindless_resource_type { + BindlessResourceType::None => {} + BindlessResourceType::Buffer => { + let binding_number = bindless_descriptor + .buffers + .iter() + .find(|bindless_buffer_descriptor| { + bindless_buffer_descriptor.bindless_index == bindless_index + }) + .expect( + "Bindless buffer descriptor matching that bindless index should be \ + present", + ) + .binding_number; + buffers.insert( + bindless_index, + MaterialBindlessBindingArray::new(binding_number, *bindless_resource_type), + ); + } + BindlessResourceType::DataBuffer => { + // Copy the data in. + let buffer_descriptor = bindless_descriptor + .buffers + .iter() + .find(|bindless_buffer_descriptor| { + bindless_buffer_descriptor.bindless_index == bindless_index + }) + .expect( + "Bindless buffer descriptor matching that bindless index should be \ + present", + ); + data_buffers.insert( + bindless_index, + MaterialDataBuffer::new( + buffer_descriptor.binding_number, + buffer_descriptor + .size + .expect("Data buffers should have a size") + as u32, + ), + ); + } + BindlessResourceType::SamplerFiltering + | BindlessResourceType::SamplerNonFiltering + | BindlessResourceType::SamplerComparison => { + samplers.insert( + *bindless_resource_type, + MaterialBindlessBindingArray::new( + *bindless_resource_type.binding_number().unwrap(), + *bindless_resource_type, + ), + ); + } + BindlessResourceType::Texture1d + | BindlessResourceType::Texture2d + | BindlessResourceType::Texture2dArray + | BindlessResourceType::Texture3d + | BindlessResourceType::TextureCube + | BindlessResourceType::TextureCubeArray => { + textures.insert( + *bindless_resource_type, + MaterialBindlessBindingArray::new( + *bindless_resource_type.binding_number().unwrap(), + *bindless_resource_type, + ), + ); + } + } + } + + let bindless_index_tables = bindless_descriptor + .index_tables + .iter() + .map(|bindless_index_table| MaterialBindlessIndexTable::new(bindless_index_table)) + .collect(); + + MaterialBindlessSlab { + bind_group: None, + bindless_index_tables, + samplers, + textures, + buffers, + data_buffers, + extra_data: vec![], + free_slots: vec![], + live_allocation_count: 0, + allocated_resource_count: 0, + } + } } impl FromWorld for FallbackBindlessResources { fn from_world(world: &mut World) -> Self { let render_device = world.resource::(); FallbackBindlessResources { - fallback_sampler: render_device.create_sampler(&SamplerDescriptor { - label: Some("fallback sampler"), + filtering_sampler: render_device.create_sampler(&SamplerDescriptor { + label: Some("fallback filtering sampler"), + ..default() + }), + non_filtering_sampler: render_device.create_sampler(&SamplerDescriptor { + label: Some("fallback non-filtering sampler"), + mag_filter: FilterMode::Nearest, + min_filter: FilterMode::Nearest, + mipmap_filter: FilterMode::Nearest, + ..default() + }), + comparison_sampler: render_device.create_sampler(&SamplerDescriptor { + label: Some("fallback comparison sampler"), + compare: Some(CompareFunction::Always), ..default() }), } } } -impl MaterialFallbackBuffers { - /// Creates a new set of fallback buffers containing dummy allocations. - /// - /// We populate unused bind group slots with these. - fn new( - render_device: &RenderDevice, - bind_group_layout_entries: &[BindGroupLayoutEntry], - ) -> MaterialFallbackBuffers { - let mut fallback_buffers = HashMap::default(); - for bind_group_layout_entry in bind_group_layout_entries { - // Create a dummy buffer of the appropriate size. - let BindingType::Buffer { - min_binding_size, .. - } = bind_group_layout_entry.ty - else { - continue; - }; - let mut size: u64 = match min_binding_size { - None => 0, - Some(min_binding_size) => min_binding_size.into(), - }; - size = size.max(MIN_BUFFER_SIZE); +impl MaterialBindGroupNonBindlessAllocator +where + M: Material, +{ + /// Creates a new [`MaterialBindGroupNonBindlessAllocator`] managing the + /// bind groups for a single non-bindless material. + fn new() -> MaterialBindGroupNonBindlessAllocator { + MaterialBindGroupNonBindlessAllocator { + bind_groups: vec![], + to_prepare: HashSet::default(), + free_indices: vec![], + phantom: PhantomData, + } + } - fallback_buffers.insert( - bind_group_layout_entry.binding, - render_device.create_buffer_with_data(&BufferInitDescriptor { - label: Some("fallback buffer"), - contents: &vec![0; size as usize], - usage: BufferUsages::UNIFORM | BufferUsages::STORAGE, - }), - ); + /// Inserts a bind group, either unprepared or prepared, into this allocator + /// and returns a [`MaterialBindingId`]. + /// + /// The returned [`MaterialBindingId`] can later be used to fetch the bind + /// group. + fn allocate( + &mut self, + bind_group: MaterialNonBindlessAllocatedBindGroup, + ) -> MaterialBindingId { + let group_id = self + .free_indices + .pop() + .unwrap_or(MaterialBindGroupIndex(self.bind_groups.len() as u32)); + if self.bind_groups.len() < *group_id as usize + 1 { + self.bind_groups + .resize_with(*group_id as usize + 1, || None); } - MaterialFallbackBuffers(fallback_buffers) + if matches!( + bind_group, + MaterialNonBindlessAllocatedBindGroup::Unprepared { .. } + ) { + self.to_prepare.insert(group_id); + } + + self.bind_groups[*group_id as usize] = Some(bind_group); + + MaterialBindingId { + group: group_id, + slot: default(), + } + } + + /// Inserts an unprepared bind group into this allocator and returns a + /// [`MaterialBindingId`]. + fn allocate_unprepared( + &mut self, + unprepared_bind_group: UnpreparedBindGroup, + bind_group_layout: BindGroupLayout, + ) -> MaterialBindingId { + self.allocate(MaterialNonBindlessAllocatedBindGroup::Unprepared { + bind_group: unprepared_bind_group, + layout: bind_group_layout, + }) + } + + /// Inserts an prepared bind group into this allocator and returns a + /// [`MaterialBindingId`]. + fn allocate_prepared( + &mut self, + prepared_bind_group: PreparedBindGroup, + ) -> MaterialBindingId { + self.allocate(MaterialNonBindlessAllocatedBindGroup::Prepared { + bind_group: prepared_bind_group, + uniform_buffers: vec![], + }) + } + + /// Deallocates the bind group with the given binding ID. + fn free(&mut self, binding_id: MaterialBindingId) { + debug_assert_eq!(binding_id.slot, MaterialBindGroupSlot(0)); + debug_assert!(self.bind_groups[*binding_id.group as usize].is_some()); + self.bind_groups[*binding_id.group as usize] = None; + self.to_prepare.remove(&binding_id.group); + self.free_indices.push(binding_id.group); + } + + /// Returns a wrapper around the bind group with the given index. + fn get(&self, group: MaterialBindGroupIndex) -> Option> { + self.bind_groups[group.0 as usize] + .as_ref() + .map(|bind_group| match bind_group { + MaterialNonBindlessAllocatedBindGroup::Prepared { bind_group, .. } => { + MaterialNonBindlessSlab::Prepared(bind_group) + } + MaterialNonBindlessAllocatedBindGroup::Unprepared { bind_group, .. } => { + MaterialNonBindlessSlab::Unprepared(bind_group) + } + }) + } + + /// Prepares any as-yet unprepared bind groups that this allocator is + /// managing. + /// + /// Unprepared bind groups can be added to this allocator with + /// [`Self::allocate_unprepared`]. Such bind groups will defer being + /// prepared until the next time this method is called. + fn prepare_bind_groups(&mut self, render_device: &RenderDevice) { + for bind_group_index in mem::take(&mut self.to_prepare) { + let Some(MaterialNonBindlessAllocatedBindGroup::Unprepared { + bind_group: unprepared_bind_group, + layout: bind_group_layout, + }) = mem::take(&mut self.bind_groups[*bind_group_index as usize]) + else { + panic!("Allocation didn't exist or was already prepared"); + }; + + // Pack any `Data` into uniform buffers. + let mut uniform_buffers = vec![]; + for (index, binding) in unprepared_bind_group.bindings.iter() { + let OwnedBindingResource::Data(ref owned_data) = *binding else { + continue; + }; + let label = format!("material uniform data {}", *index); + let uniform_buffer = render_device.create_buffer_with_data(&BufferInitDescriptor { + label: Some(&label), + contents: &owned_data.0, + usage: BufferUsages::COPY_DST | BufferUsages::UNIFORM, + }); + uniform_buffers.push(uniform_buffer); + } + + // Create bind group entries. + let mut bind_group_entries = vec![]; + let mut uniform_buffers_iter = uniform_buffers.iter(); + for (index, binding) in unprepared_bind_group.bindings.iter() { + match *binding { + OwnedBindingResource::Data(_) => { + bind_group_entries.push(BindGroupEntry { + binding: *index, + resource: uniform_buffers_iter + .next() + .expect("We should have created uniform buffers for each `Data`") + .as_entire_binding(), + }); + } + _ => bind_group_entries.push(BindGroupEntry { + binding: *index, + resource: binding.get_binding(), + }), + } + } + + // Create the bind group. + let bind_group = render_device.create_bind_group( + M::label(), + &bind_group_layout, + &bind_group_entries, + ); + + self.bind_groups[*bind_group_index as usize] = + Some(MaterialNonBindlessAllocatedBindGroup::Prepared { + bind_group: PreparedBindGroup { + bindings: unprepared_bind_group.bindings, + bind_group, + data: unprepared_bind_group.data, + }, + uniform_buffers, + }); + } + } +} + +impl<'a, M> MaterialSlab<'a, M> +where + M: Material, +{ + /// Returns the extra data associated with this material. + /// + /// When deriving `AsBindGroup`, this data is given by the + /// `#[bind_group_data(DataType)]` attribute on the material structure. + pub fn get_extra_data(&self, slot: MaterialBindGroupSlot) -> &M::Data { + match self.0 { + MaterialSlabImpl::Bindless(material_bindless_slab) => { + material_bindless_slab.get_extra_data(slot) + } + MaterialSlabImpl::NonBindless(MaterialNonBindlessSlab::Prepared( + prepared_bind_group, + )) => &prepared_bind_group.data, + MaterialSlabImpl::NonBindless(MaterialNonBindlessSlab::Unprepared( + unprepared_bind_group, + )) => &unprepared_bind_group.data, + } + } + + /// Returns the [`BindGroup`] corresponding to this slab, if it's been + /// prepared. + /// + /// You can prepare bind groups by calling + /// [`MaterialBindGroupAllocator::prepare_bind_groups`]. If the bind group + /// isn't ready, this method returns `None`. + pub fn bind_group(&self) -> Option<&'a BindGroup> { + match self.0 { + MaterialSlabImpl::Bindless(material_bindless_slab) => { + material_bindless_slab.bind_group() + } + MaterialSlabImpl::NonBindless(MaterialNonBindlessSlab::Prepared( + prepared_bind_group, + )) => Some(&prepared_bind_group.bind_group), + MaterialSlabImpl::NonBindless(MaterialNonBindlessSlab::Unprepared(_)) => None, + } + } +} + +impl MaterialDataBuffer { + /// Creates a new [`MaterialDataBuffer`] managing a buffer of elements of + /// size `aligned_element_size` that will be bound to the given binding + /// number. + fn new(binding_number: BindingNumber, aligned_element_size: u32) -> MaterialDataBuffer { + MaterialDataBuffer { + binding_number, + buffer: RetainedRawBufferVec::new(BufferUsages::STORAGE), + aligned_element_size, + free_slots: vec![], + len: 0, + } + } + + /// Allocates a slot for a new piece of data, copies the data into that + /// slot, and returns the slot ID. + /// + /// The size of the piece of data supplied to this method must equal the + /// [`Self::aligned_element_size`] provided to [`MaterialDataBuffer::new`]. + fn insert(&mut self, data: &[u8]) -> u32 { + // Make the the data is of the right length. + debug_assert_eq!(data.len(), self.aligned_element_size as usize); + + // Grab a slot. + let slot = self.free_slots.pop().unwrap_or(self.len); + + // Calculate the range we're going to copy to. + let start = slot as usize * self.aligned_element_size as usize; + let end = (slot as usize + 1) * self.aligned_element_size as usize; + + // Resize the buffer if necessary. + if self.buffer.len() < end { + self.buffer.reserve_internal(end); + } + while self.buffer.values().len() < end { + self.buffer.push(0); + } + + // Copy in the data. + self.buffer.values_mut()[start..end].copy_from_slice(data); + + // Mark the buffer dirty, and finish up. + self.len += 1; + self.buffer.dirty = BufferDirtyState::NeedsReserve; + slot + } + + /// Marks the given slot as free. + fn remove(&mut self, slot: u32) { + self.free_slots.push(slot); + self.len -= 1; } } diff --git a/crates/bevy_pbr/src/mesh_material.rs b/crates/bevy_pbr/src/mesh_material.rs index c3f81943ec..027f2073ec 100644 --- a/crates/bevy_pbr/src/mesh_material.rs +++ b/crates/bevy_pbr/src/mesh_material.rs @@ -36,8 +36,8 @@ use derive_more::derive::From; /// )); /// } /// ``` -#[derive(Component, Clone, Debug, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default)] +#[derive(Component, Clone, Debug, Deref, DerefMut, Reflect, From)] +#[reflect(Component, Default, Clone, PartialEq)] pub struct MeshMaterial3d(pub Handle); impl Default for MeshMaterial3d { @@ -46,6 +46,14 @@ impl Default for MeshMaterial3d { } } +impl PartialEq for MeshMaterial3d { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl Eq for MeshMaterial3d {} + impl From> for AssetId { fn from(material: MeshMaterial3d) -> Self { material.id() diff --git a/crates/bevy_pbr/src/meshlet/clear_visibility_buffer.wgsl b/crates/bevy_pbr/src/meshlet/clear_visibility_buffer.wgsl new file mode 100644 index 0000000000..5956921ca1 --- /dev/null +++ b/crates/bevy_pbr/src/meshlet/clear_visibility_buffer.wgsl @@ -0,0 +1,18 @@ +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT +@group(0) @binding(0) var meshlet_visibility_buffer: texture_storage_2d; +#else +@group(0) @binding(0) var meshlet_visibility_buffer: texture_storage_2d; +#endif +var view_size: vec2; + +@compute +@workgroup_size(16, 16, 1) +fn clear_visibility_buffer(@builtin(global_invocation_id) global_id: vec3) { + if any(global_id.xy >= view_size) { return; } + +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT + textureStore(meshlet_visibility_buffer, global_id.xy, vec4(0lu)); +#else + textureStore(meshlet_visibility_buffer, global_id.xy, vec4(0u)); +#endif +} diff --git a/crates/bevy_pbr/src/meshlet/from_mesh.rs b/crates/bevy_pbr/src/meshlet/from_mesh.rs index 7b959eef46..ed2be52f53 100644 --- a/crates/bevy_pbr/src/meshlet/from_mesh.rs +++ b/crates/bevy_pbr/src/meshlet/from_mesh.rs @@ -3,7 +3,7 @@ use super::asset::{ }; use alloc::borrow::Cow; use bevy_math::{ops::log2, IVec3, Vec2, Vec3, Vec3Swizzles}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_render::{ mesh::{Indices, Mesh}, render_resource::PrimitiveTopology, @@ -102,11 +102,13 @@ impl MeshletMesh { }, }) .collect::>(); - let mut simplification_errors = iter::repeat(MeshletSimplificationError { - group_error: f16::ZERO, - parent_group_error: f16::MAX, - }) - .take(meshlets.len()) + let mut simplification_errors = iter::repeat_n( + MeshletSimplificationError { + group_error: f16::ZERO, + parent_group_error: f16::MAX, + }, + meshlets.len(), + ) .collect::>(); let mut vertex_locks = vec![false; vertices.vertex_count]; @@ -187,13 +189,13 @@ impl MeshletMesh { }, } })); - simplification_errors.extend( - iter::repeat(MeshletSimplificationError { + simplification_errors.extend(iter::repeat_n( + MeshletSimplificationError { group_error, parent_group_error: f16::MAX, - }) - .take(new_meshlet_ids.len()), - ); + }, + new_meshlet_ids.len(), + )); } // Set simplification queue to the list of newly created meshlets diff --git a/crates/bevy_pbr/src/meshlet/instance_manager.rs b/crates/bevy_pbr/src/meshlet/instance_manager.rs index 26f6432a1f..661d4791ae 100644 --- a/crates/bevy_pbr/src/meshlet/instance_manager.rs +++ b/crates/bevy_pbr/src/meshlet/instance_manager.rs @@ -1,18 +1,18 @@ use super::{meshlet_mesh_manager::MeshletMeshManager, MeshletMesh, MeshletMesh3d}; use crate::{ - Material, MeshFlags, MeshTransforms, MeshUniform, NotShadowCaster, NotShadowReceiver, - PreviousGlobalTransform, RenderMaterialBindings, RenderMaterialInstances, - RenderMeshMaterialIds, + material::DUMMY_MESH_MATERIAL, Material, MaterialBindingId, MeshFlags, MeshTransforms, + MeshUniform, NotShadowCaster, NotShadowReceiver, PreviousGlobalTransform, + RenderMaterialBindings, RenderMaterialInstances, }; use bevy_asset::{AssetEvent, AssetServer, Assets, UntypedAssetId}; use bevy_ecs::{ - entity::{hash_map::EntityHashMap, Entities, Entity}, + entity::{Entities, Entity, EntityHashMap}, event::EventReader, query::Has, resource::Resource, system::{Local, Query, Res, ResMut, SystemState}, }; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_render::{ render_resource::StorageBuffer, sync_world::MainEntity, view::RenderLayers, MainWorld, }; @@ -90,7 +90,7 @@ impl InstanceManager { transform: &GlobalTransform, previous_transform: Option<&PreviousGlobalTransform>, render_layers: Option<&RenderLayers>, - mesh_material_ids: &RenderMeshMaterialIds, + mesh_material_ids: &RenderMaterialInstances, render_material_bindings: &RenderMaterialBindings, not_shadow_receiver: bool, not_shadow_caster: bool, @@ -113,10 +113,15 @@ impl InstanceManager { }; let mesh_material = mesh_material_ids.mesh_material(instance); - let mesh_material_binding_id = render_material_bindings - .get(&mesh_material) - .cloned() - .unwrap_or_default(); + let mesh_material_binding_id = if mesh_material != DUMMY_MESH_MATERIAL.untyped() { + render_material_bindings + .get(&mesh_material) + .cloned() + .unwrap_or_default() + } else { + // Use a dummy binding ID if the mesh has no material + MaterialBindingId::default() + }; let mesh_uniform = MeshUniform::new( &transforms, @@ -187,7 +192,7 @@ pub fn extract_meshlet_mesh_entities( mut instance_manager: ResMut, // TODO: Replace main_world and system_state when Extract>> is possible mut main_world: ResMut, - mesh_material_ids: Res, + mesh_material_ids: Res, render_material_bindings: Res, mut system_state: Local< Option< @@ -269,20 +274,22 @@ pub fn extract_meshlet_mesh_entities( /// and note that the material is used by at least one entity in the scene. pub fn queue_material_meshlet_meshes( mut instance_manager: ResMut, - render_material_instances: Res>, + render_material_instances: Res, ) { let instance_manager = instance_manager.deref_mut(); for (i, (instance, _, _)) in instance_manager.instances.iter().enumerate() { - if let Some(material_asset_id) = render_material_instances.get(instance) { - if let Some(material_id) = instance_manager - .material_id_lookup - .get(&material_asset_id.untyped()) - { - instance_manager - .material_ids_present_in_scene - .insert(*material_id); - instance_manager.instance_material_ids.get_mut()[i] = *material_id; + if let Some(material_instance) = render_material_instances.instances.get(instance) { + if let Ok(material_asset_id) = material_instance.asset_id.try_typed::() { + if let Some(material_id) = instance_manager + .material_id_lookup + .get(&material_asset_id.untyped()) + { + instance_manager + .material_ids_present_in_scene + .insert(*material_id); + instance_manager.instance_material_ids.get_mut()[i] = *material_id; + } } } } diff --git a/crates/bevy_pbr/src/meshlet/material_pipeline_prepare.rs b/crates/bevy_pbr/src/meshlet/material_pipeline_prepare.rs index 0e71d5a9d9..57762bfc8a 100644 --- a/crates/bevy_pbr/src/meshlet/material_pipeline_prepare.rs +++ b/crates/bevy_pbr/src/meshlet/material_pipeline_prepare.rs @@ -13,7 +13,7 @@ use bevy_core_pipeline::{ tonemapping::{DebandDither, Tonemapping}, }; use bevy_derive::{Deref, DerefMut}; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_render::{ camera::TemporalJitter, mesh::{Mesh, MeshVertexBufferLayout, MeshVertexBufferLayoutRef, MeshVertexBufferLayouts}, @@ -37,7 +37,7 @@ pub fn prepare_material_meshlet_meshes_main_opaque_pass( material_pipeline: Res>, mesh_pipeline: Res, render_materials: Res>>, - render_material_instances: Res>, + render_material_instances: Res, material_bind_group_allocator: Res>, asset_server: Res, mut mesh_vertex_buffer_layouts: ResMut, @@ -148,8 +148,13 @@ pub fn prepare_material_meshlet_meshes_main_opaque_pass( view_key |= MeshPipelineKey::from_primitive_topology(PrimitiveTopology::TriangleList); - for material_id in render_material_instances.values().collect::>() { - let Some(material) = render_materials.get(*material_id) else { + for material_id in render_material_instances + .instances + .values() + .flat_map(|instance| instance.asset_id.try_typed::().ok()) + .collect::>() + { + let Some(material) = render_materials.get(material_id) else { continue; }; let Some(material_bind_group) = @@ -228,7 +233,7 @@ pub fn prepare_material_meshlet_meshes_main_opaque_pass( else { continue; }; - let Some(bind_group) = material_bind_group.get_bind_group() else { + let Some(bind_group) = material_bind_group.bind_group() else { continue; }; @@ -256,7 +261,7 @@ pub fn prepare_material_meshlet_meshes_prepass( pipeline_cache: Res, prepass_pipeline: Res>, render_materials: Res>>, - render_material_instances: Res>, + render_material_instances: Res, mut mesh_vertex_buffer_layouts: ResMut, material_bind_group_allocator: Res>, asset_server: Res, @@ -293,8 +298,13 @@ pub fn prepare_material_meshlet_meshes_prepass( view_key |= MeshPipelineKey::from_primitive_topology(PrimitiveTopology::TriangleList); - for material_id in render_material_instances.values().collect::>() { - let Some(material) = render_materials.get(*material_id) else { + for material_id in render_material_instances + .instances + .values() + .flat_map(|instance| instance.asset_id.try_typed::().ok()) + .collect::>() + { + let Some(material) = render_materials.get(material_id) else { continue; }; let Some(material_bind_group) = @@ -336,9 +346,12 @@ pub fn prepare_material_meshlet_meshes_prepass( shader_defs.push("MESHLET_MESH_MATERIAL_PASS".into()); let view_layout = if view_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) { - prepass_pipeline.view_layout_motion_vectors.clone() + prepass_pipeline.internal.view_layout_motion_vectors.clone() } else { - prepass_pipeline.view_layout_no_motion_vectors.clone() + prepass_pipeline + .internal + .view_layout_no_motion_vectors + .clone() }; let fragment_shader = if view_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { @@ -357,7 +370,7 @@ pub fn prepare_material_meshlet_meshes_prepass( layout: vec![ view_layout, resource_manager.material_shade_bind_group_layout.clone(), - prepass_pipeline.material_layout.clone(), + prepass_pipeline.internal.material_layout.clone(), ], push_constant_ranges: vec![], vertex: VertexState { @@ -399,7 +412,7 @@ pub fn prepare_material_meshlet_meshes_prepass( else { continue; }; - let Some(bind_group) = material_bind_group.get_bind_group() else { + let Some(bind_group) = material_bind_group.bind_group() else { continue; }; diff --git a/crates/bevy_pbr/src/meshlet/meshlet_bindings.wgsl b/crates/bevy_pbr/src/meshlet/meshlet_bindings.wgsl index 7af63d0e0f..e179e78b7a 100644 --- a/crates/bevy_pbr/src/meshlet/meshlet_bindings.wgsl +++ b/crates/bevy_pbr/src/meshlet/meshlet_bindings.wgsl @@ -100,9 +100,9 @@ fn cluster_is_second_pass_candidate(cluster_id: u32) -> bool { @group(0) @binding(6) var meshlet_raster_clusters: array; // Single object shared between all workgroups @group(0) @binding(7) var meshlet_software_raster_cluster_count: u32; #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT -@group(0) @binding(8) var meshlet_visibility_buffer: array>; // Per pixel +@group(0) @binding(8) var meshlet_visibility_buffer: texture_storage_2d; #else -@group(0) @binding(8) var meshlet_visibility_buffer: array>; // Per pixel +@group(0) @binding(8) var meshlet_visibility_buffer: texture_storage_2d; #endif @group(0) @binding(9) var view: View; @@ -149,7 +149,7 @@ fn get_meshlet_vertex_position(meshlet: ptr, vertex_id: u32) #endif #ifdef MESHLET_MESH_MATERIAL_PASS -@group(1) @binding(0) var meshlet_visibility_buffer: array; // Per pixel +@group(1) @binding(0) var meshlet_visibility_buffer: texture_storage_2d; @group(1) @binding(1) var meshlet_cluster_meshlet_ids: array; // Per cluster @group(1) @binding(2) var meshlets: array; // Per meshlet @group(1) @binding(3) var meshlet_indices: array; // Many per meshlet diff --git a/crates/bevy_pbr/src/meshlet/meshlet_mesh_manager.rs b/crates/bevy_pbr/src/meshlet/meshlet_mesh_manager.rs index 2bc686fe43..0f4aab7509 100644 --- a/crates/bevy_pbr/src/meshlet/meshlet_mesh_manager.rs +++ b/crates/bevy_pbr/src/meshlet/meshlet_mesh_manager.rs @@ -11,7 +11,7 @@ use bevy_ecs::{ world::{FromWorld, World}, }; use bevy_math::Vec2; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_render::{ render_resource::BufferAddress, renderer::{RenderDevice, RenderQueue}, diff --git a/crates/bevy_pbr/src/meshlet/mod.rs b/crates/bevy_pbr/src/meshlet/mod.rs index dc53d5f9e7..2e483b210c 100644 --- a/crates/bevy_pbr/src/meshlet/mod.rs +++ b/crates/bevy_pbr/src/meshlet/mod.rs @@ -65,11 +65,11 @@ use bevy_core_pipeline::{ }; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - component::{require, Component}, + component::Component, entity::Entity, query::Has, reflect::ReflectComponent, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Commands, Query}, }; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -106,9 +106,9 @@ const MESHLET_MESH_MATERIAL_SHADER_HANDLE: Handle = /// * Requires preprocessing meshes. See [`MeshletMesh`] for details. /// * Limitations on the kinds of materials you can use. See [`MeshletMesh`] for details. /// -/// This plugin requires a fairly recent GPU that supports [`WgpuFeatures::SHADER_INT64_ATOMIC_MIN_MAX`]. +/// This plugin requires a fairly recent GPU that supports [`WgpuFeatures::TEXTURE_INT64_ATOMIC`]. /// -/// This plugin currently works only on the Vulkan backend. +/// This plugin currently works only on the Vulkan and Metal backends. /// /// This plugin is not compatible with [`Msaa`]. Any camera rendering a [`MeshletMesh`] must have /// [`Msaa`] set to [`Msaa::Off`]. @@ -133,7 +133,8 @@ pub struct MeshletPlugin { impl MeshletPlugin { /// [`WgpuFeatures`] required for this plugin to function. pub fn required_wgpu_features() -> WgpuFeatures { - WgpuFeatures::SHADER_INT64_ATOMIC_MIN_MAX + WgpuFeatures::TEXTURE_INT64_ATOMIC + | WgpuFeatures::TEXTURE_ATOMIC | WgpuFeatures::SHADER_INT64 | WgpuFeatures::SUBGROUP | WgpuFeatures::DEPTH_CLIP_CONTROL @@ -151,6 +152,12 @@ impl Plugin for MeshletPlugin { std::process::exit(1); } + load_internal_asset!( + app, + MESHLET_CLEAR_VISIBILITY_BUFFER_SHADER_HANDLE, + "clear_visibility_buffer.wgsl", + Shader::from_wgsl + ); load_internal_asset!( app, MESHLET_BINDINGS_SHADER_HANDLE, @@ -246,13 +253,11 @@ impl Plugin for MeshletPlugin { Core3d, ( NodeMeshlet::VisibilityBufferRasterPass, - NodePbr::ShadowPass, + NodePbr::EarlyShadowPass, // NodeMeshlet::Prepass, // NodeMeshlet::DeferredPrepass, - Node3d::DeferredPrepass, - Node3d::CopyDeferredLightingId, Node3d::EndPrepasses, // Node3d::StartMainPass, @@ -285,7 +290,7 @@ impl Plugin for MeshletPlugin { /// The meshlet mesh equivalent of [`bevy_render::mesh::Mesh3d`]. #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone, PartialEq)] #[require(Transform, PreviousGlobalTransform, Visibility, VisibilityClass)] #[component(on_add = view::add_visibility_class::)] pub struct MeshletMesh3d(pub Handle); diff --git a/crates/bevy_pbr/src/meshlet/pipelines.rs b/crates/bevy_pbr/src/meshlet/pipelines.rs index 0a5bffa35a..c25d896b8a 100644 --- a/crates/bevy_pbr/src/meshlet/pipelines.rs +++ b/crates/bevy_pbr/src/meshlet/pipelines.rs @@ -10,6 +10,8 @@ use bevy_ecs::{ }; use bevy_render::render_resource::*; +pub const MESHLET_CLEAR_VISIBILITY_BUFFER_SHADER_HANDLE: Handle = + weak_handle!("a4bf48e4-5605-4d1c-987e-29c7b1ec95dc"); pub const MESHLET_FILL_CLUSTER_BUFFERS_SHADER_HANDLE: Handle = weak_handle!("80ccea4a-8234-4ee0-af74-77b3cad503cf"); pub const MESHLET_CULLING_SHADER_HANDLE: Handle = @@ -26,6 +28,8 @@ pub const MESHLET_REMAP_1D_TO_2D_DISPATCH_SHADER_HANDLE: Handle = #[derive(Resource)] pub struct MeshletPipelines { fill_cluster_buffers: CachedComputePipelineId, + clear_visibility_buffer: CachedComputePipelineId, + clear_visibility_buffer_shadow_view: CachedComputePipelineId, cull_first: CachedComputePipelineId, cull_second: CachedComputePipelineId, downsample_depth_first: CachedComputePipelineId, @@ -33,10 +37,10 @@ pub struct MeshletPipelines { downsample_depth_first_shadow_view: CachedComputePipelineId, downsample_depth_second_shadow_view: CachedComputePipelineId, visibility_buffer_software_raster: CachedComputePipelineId, - visibility_buffer_software_raster_depth_only: CachedComputePipelineId, + visibility_buffer_software_raster_shadow_view: CachedComputePipelineId, visibility_buffer_hardware_raster: CachedRenderPipelineId, - visibility_buffer_hardware_raster_depth_only: CachedRenderPipelineId, - visibility_buffer_hardware_raster_depth_only_unclipped: CachedRenderPipelineId, + visibility_buffer_hardware_raster_shadow_view: CachedRenderPipelineId, + visibility_buffer_hardware_raster_shadow_view_unclipped: CachedRenderPipelineId, resolve_depth: CachedRenderPipelineId, resolve_depth_shadow_view: CachedRenderPipelineId, resolve_material_depth: CachedRenderPipelineId, @@ -49,12 +53,27 @@ impl FromWorld for MeshletPipelines { let fill_cluster_buffers_bind_group_layout = resource_manager .fill_cluster_buffers_bind_group_layout .clone(); + let clear_visibility_buffer_bind_group_layout = resource_manager + .clear_visibility_buffer_bind_group_layout + .clone(); + let clear_visibility_buffer_shadow_view_bind_group_layout = resource_manager + .clear_visibility_buffer_shadow_view_bind_group_layout + .clone(); let cull_layout = resource_manager.culling_bind_group_layout.clone(); let downsample_depth_layout = resource_manager.downsample_depth_bind_group_layout.clone(); + let downsample_depth_shadow_view_layout = resource_manager + .downsample_depth_shadow_view_bind_group_layout + .clone(); let visibility_buffer_raster_layout = resource_manager .visibility_buffer_raster_bind_group_layout .clone(); + let visibility_buffer_raster_shadow_view_layout = resource_manager + .visibility_buffer_raster_shadow_view_bind_group_layout + .clone(); let resolve_depth_layout = resource_manager.resolve_depth_bind_group_layout.clone(); + let resolve_depth_shadow_view_layout = resource_manager + .resolve_depth_shadow_view_bind_group_layout + .clone(); let resolve_material_depth_layout = resource_manager .resolve_material_depth_bind_group_layout .clone(); @@ -67,7 +86,7 @@ impl FromWorld for MeshletPipelines { fill_cluster_buffers: pipeline_cache.queue_compute_pipeline( ComputePipelineDescriptor { label: Some("meshlet_fill_cluster_buffers_pipeline".into()), - layout: vec![fill_cluster_buffers_bind_group_layout.clone()], + layout: vec![fill_cluster_buffers_bind_group_layout], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::COMPUTE, range: 0..4, @@ -79,6 +98,36 @@ impl FromWorld for MeshletPipelines { }, ), + clear_visibility_buffer: pipeline_cache.queue_compute_pipeline( + ComputePipelineDescriptor { + label: Some("meshlet_clear_visibility_buffer_pipeline".into()), + layout: vec![clear_visibility_buffer_bind_group_layout], + push_constant_ranges: vec![PushConstantRange { + stages: ShaderStages::COMPUTE, + range: 0..8, + }], + shader: MESHLET_CLEAR_VISIBILITY_BUFFER_SHADER_HANDLE, + shader_defs: vec!["MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into()], + entry_point: "clear_visibility_buffer".into(), + zero_initialize_workgroup_memory: false, + }, + ), + + clear_visibility_buffer_shadow_view: pipeline_cache.queue_compute_pipeline( + ComputePipelineDescriptor { + label: Some("meshlet_clear_visibility_buffer_shadow_view_pipeline".into()), + layout: vec![clear_visibility_buffer_shadow_view_bind_group_layout], + push_constant_ranges: vec![PushConstantRange { + stages: ShaderStages::COMPUTE, + range: 0..8, + }], + shader: MESHLET_CLEAR_VISIBILITY_BUFFER_SHADER_HANDLE, + shader_defs: vec![], + entry_point: "clear_visibility_buffer".into(), + zero_initialize_workgroup_memory: false, + }, + ), + cull_first: pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor { label: Some("meshlet_culling_first_pipeline".into()), layout: vec![cull_layout.clone()], @@ -117,7 +166,7 @@ impl FromWorld for MeshletPipelines { layout: vec![downsample_depth_layout.clone()], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::COMPUTE, - range: 0..8, + range: 0..4, }], shader: DOWNSAMPLE_DEPTH_SHADER_HANDLE, shader_defs: vec![ @@ -135,7 +184,7 @@ impl FromWorld for MeshletPipelines { layout: vec![downsample_depth_layout.clone()], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::COMPUTE, - range: 0..8, + range: 0..4, }], shader: DOWNSAMPLE_DEPTH_SHADER_HANDLE, shader_defs: vec![ @@ -150,10 +199,10 @@ impl FromWorld for MeshletPipelines { downsample_depth_first_shadow_view: pipeline_cache.queue_compute_pipeline( ComputePipelineDescriptor { label: Some("meshlet_downsample_depth_first_pipeline".into()), - layout: vec![downsample_depth_layout.clone()], + layout: vec![downsample_depth_shadow_view_layout.clone()], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::COMPUTE, - range: 0..8, + range: 0..4, }], shader: DOWNSAMPLE_DEPTH_SHADER_HANDLE, shader_defs: vec!["MESHLET".into()], @@ -165,10 +214,10 @@ impl FromWorld for MeshletPipelines { downsample_depth_second_shadow_view: pipeline_cache.queue_compute_pipeline( ComputePipelineDescriptor { label: Some("meshlet_downsample_depth_second_pipeline".into()), - layout: vec![downsample_depth_layout], + layout: vec![downsample_depth_shadow_view_layout], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::COMPUTE, - range: 0..8, + range: 0..4, }], shader: DOWNSAMPLE_DEPTH_SHADER_HANDLE, shader_defs: vec!["MESHLET".into()], @@ -198,12 +247,12 @@ impl FromWorld for MeshletPipelines { }, ), - visibility_buffer_software_raster_depth_only: pipeline_cache.queue_compute_pipeline( + visibility_buffer_software_raster_shadow_view: pipeline_cache.queue_compute_pipeline( ComputePipelineDescriptor { label: Some( - "meshlet_visibility_buffer_software_raster_depth_only_pipeline".into(), + "meshlet_visibility_buffer_software_raster_shadow_view_pipeline".into(), ), - layout: vec![visibility_buffer_raster_layout.clone()], + layout: vec![visibility_buffer_raster_shadow_view_layout.clone()], push_constant_ranges: vec![], shader: MESHLET_VISIBILITY_BUFFER_SOFTWARE_RASTER_SHADER_HANDLE, shader_defs: vec![ @@ -265,12 +314,12 @@ impl FromWorld for MeshletPipelines { }, ), - visibility_buffer_hardware_raster_depth_only: pipeline_cache.queue_render_pipeline( + visibility_buffer_hardware_raster_shadow_view: pipeline_cache.queue_render_pipeline( RenderPipelineDescriptor { label: Some( - "meshlet_visibility_buffer_hardware_raster_depth_only_pipeline".into(), + "meshlet_visibility_buffer_hardware_raster_shadow_view_pipeline".into(), ), - layout: vec![visibility_buffer_raster_layout.clone()], + layout: vec![visibility_buffer_raster_shadow_view_layout.clone()], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::VERTEX, range: 0..4, @@ -306,13 +355,13 @@ impl FromWorld for MeshletPipelines { }, ), - visibility_buffer_hardware_raster_depth_only_unclipped: pipeline_cache + visibility_buffer_hardware_raster_shadow_view_unclipped: pipeline_cache .queue_render_pipeline(RenderPipelineDescriptor { label: Some( - "meshlet_visibility_buffer_hardware_raster_depth_only_unclipped_pipeline" + "meshlet_visibility_buffer_hardware_raster_shadow_view_unclipped_pipeline" .into(), ), - layout: vec![visibility_buffer_raster_layout], + layout: vec![visibility_buffer_raster_shadow_view_layout], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::VERTEX, range: 0..4, @@ -349,17 +398,14 @@ impl FromWorld for MeshletPipelines { resolve_depth: pipeline_cache.queue_render_pipeline(RenderPipelineDescriptor { label: Some("meshlet_resolve_depth_pipeline".into()), - layout: vec![resolve_depth_layout.clone()], - push_constant_ranges: vec![PushConstantRange { - stages: ShaderStages::FRAGMENT, - range: 0..4, - }], + layout: vec![resolve_depth_layout], + push_constant_ranges: vec![], vertex: fullscreen_shader_vertex_state(), primitive: PrimitiveState::default(), depth_stencil: Some(DepthStencilState { format: CORE_3D_DEPTH_FORMAT, depth_write_enabled: true, - depth_compare: CompareFunction::GreaterEqual, + depth_compare: CompareFunction::Always, stencil: StencilState::default(), bias: DepthBiasState::default(), }), @@ -376,17 +422,14 @@ impl FromWorld for MeshletPipelines { resolve_depth_shadow_view: pipeline_cache.queue_render_pipeline( RenderPipelineDescriptor { label: Some("meshlet_resolve_depth_pipeline".into()), - layout: vec![resolve_depth_layout], - push_constant_ranges: vec![PushConstantRange { - stages: ShaderStages::FRAGMENT, - range: 0..4, - }], + layout: vec![resolve_depth_shadow_view_layout], + push_constant_ranges: vec![], vertex: fullscreen_shader_vertex_state(), primitive: PrimitiveState::default(), depth_stencil: Some(DepthStencilState { format: CORE_3D_DEPTH_FORMAT, depth_write_enabled: true, - depth_compare: CompareFunction::GreaterEqual, + depth_compare: CompareFunction::Always, stencil: StencilState::default(), bias: DepthBiasState::default(), }), @@ -405,10 +448,7 @@ impl FromWorld for MeshletPipelines { RenderPipelineDescriptor { label: Some("meshlet_resolve_material_depth_pipeline".into()), layout: vec![resolve_material_depth_layout], - push_constant_ranges: vec![PushConstantRange { - stages: ShaderStages::FRAGMENT, - range: 0..4, - }], + push_constant_ranges: vec![], vertex: fullscreen_shader_vertex_state(), primitive: PrimitiveState::default(), depth_stencil: Some(DepthStencilState { @@ -460,6 +500,8 @@ impl MeshletPipelines { &ComputePipeline, &ComputePipeline, &ComputePipeline, + &ComputePipeline, + &ComputePipeline, &RenderPipeline, &RenderPipeline, &RenderPipeline, @@ -472,6 +514,8 @@ impl MeshletPipelines { let pipeline = world.get_resource::()?; Some(( pipeline_cache.get_compute_pipeline(pipeline.fill_cluster_buffers)?, + pipeline_cache.get_compute_pipeline(pipeline.clear_visibility_buffer)?, + pipeline_cache.get_compute_pipeline(pipeline.clear_visibility_buffer_shadow_view)?, pipeline_cache.get_compute_pipeline(pipeline.cull_first)?, pipeline_cache.get_compute_pipeline(pipeline.cull_second)?, pipeline_cache.get_compute_pipeline(pipeline.downsample_depth_first)?, @@ -480,12 +524,12 @@ impl MeshletPipelines { pipeline_cache.get_compute_pipeline(pipeline.downsample_depth_second_shadow_view)?, pipeline_cache.get_compute_pipeline(pipeline.visibility_buffer_software_raster)?, pipeline_cache - .get_compute_pipeline(pipeline.visibility_buffer_software_raster_depth_only)?, + .get_compute_pipeline(pipeline.visibility_buffer_software_raster_shadow_view)?, pipeline_cache.get_render_pipeline(pipeline.visibility_buffer_hardware_raster)?, pipeline_cache - .get_render_pipeline(pipeline.visibility_buffer_hardware_raster_depth_only)?, + .get_render_pipeline(pipeline.visibility_buffer_hardware_raster_shadow_view)?, pipeline_cache.get_render_pipeline( - pipeline.visibility_buffer_hardware_raster_depth_only_unclipped, + pipeline.visibility_buffer_hardware_raster_shadow_view_unclipped, )?, pipeline_cache.get_render_pipeline(pipeline.resolve_depth)?, pipeline_cache.get_render_pipeline(pipeline.resolve_depth_shadow_view)?, diff --git a/crates/bevy_pbr/src/meshlet/resolve_render_targets.wgsl b/crates/bevy_pbr/src/meshlet/resolve_render_targets.wgsl index 3c0cfcf943..eaa4eed6c4 100644 --- a/crates/bevy_pbr/src/meshlet/resolve_render_targets.wgsl +++ b/crates/bevy_pbr/src/meshlet/resolve_render_targets.wgsl @@ -1,35 +1,36 @@ #import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT -@group(0) @binding(0) var meshlet_visibility_buffer: array; // Per pixel +@group(0) @binding(0) var meshlet_visibility_buffer: texture_storage_2d; #else -@group(0) @binding(0) var meshlet_visibility_buffer: array; // Per pixel +@group(0) @binding(0) var meshlet_visibility_buffer: texture_storage_2d; #endif @group(0) @binding(1) var meshlet_cluster_instance_ids: array; // Per cluster @group(0) @binding(2) var meshlet_instance_material_ids: array; // Per entity instance -var view_width: u32; /// This pass writes out the depth texture. @fragment fn resolve_depth(in: FullscreenVertexOutput) -> @builtin(frag_depth) f32 { - let frag_coord_1d = u32(in.position.y) * view_width + u32(in.position.x); - let visibility = meshlet_visibility_buffer[frag_coord_1d]; + let visibility = textureLoad(meshlet_visibility_buffer, vec2(in.position.xy)).r; #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT - return bitcast(u32(visibility >> 32u)); + let depth = u32(visibility >> 32u); #else - return bitcast(visibility); + let depth = visibility; #endif + + if depth == 0u { discard; } + + return bitcast(depth); } /// This pass writes out the material depth texture. #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT @fragment fn resolve_material_depth(in: FullscreenVertexOutput) -> @builtin(frag_depth) f32 { - let frag_coord_1d = u32(in.position.y) * view_width + u32(in.position.x); - let visibility = meshlet_visibility_buffer[frag_coord_1d]; + let visibility = textureLoad(meshlet_visibility_buffer, vec2(in.position.xy)).r; let depth = visibility >> 32u; - if depth == 0lu { return 0.0; } + if depth == 0lu { discard; } let cluster_id = u32(visibility) >> 7u; let instance_id = meshlet_cluster_instance_ids[cluster_id]; diff --git a/crates/bevy_pbr/src/meshlet/resource_manager.rs b/crates/bevy_pbr/src/meshlet/resource_manager.rs index b2d6cff11d..9b45d7676a 100644 --- a/crates/bevy_pbr/src/meshlet/resource_manager.rs +++ b/crates/bevy_pbr/src/meshlet/resource_manager.rs @@ -8,7 +8,7 @@ use bevy_core_pipeline::{ }; use bevy_ecs::{ component::Component, - entity::{hash_map::EntityHashMap, Entity}, + entity::{Entity, EntityHashMap}, query::AnyOf, resource::Resource, system::{Commands, Query, Res, ResMut}, @@ -50,10 +50,15 @@ pub struct ResourceManager { // Bind group layouts pub fill_cluster_buffers_bind_group_layout: BindGroupLayout, + pub clear_visibility_buffer_bind_group_layout: BindGroupLayout, + pub clear_visibility_buffer_shadow_view_bind_group_layout: BindGroupLayout, pub culling_bind_group_layout: BindGroupLayout, pub visibility_buffer_raster_bind_group_layout: BindGroupLayout, + pub visibility_buffer_raster_shadow_view_bind_group_layout: BindGroupLayout, pub downsample_depth_bind_group_layout: BindGroupLayout, + pub downsample_depth_shadow_view_bind_group_layout: BindGroupLayout, pub resolve_depth_bind_group_layout: BindGroupLayout, + pub resolve_depth_shadow_view_bind_group_layout: BindGroupLayout, pub resolve_material_depth_bind_group_layout: BindGroupLayout, pub material_shade_bind_group_layout: BindGroupLayout, pub remap_1d_to_2d_dispatch_bind_group_layout: Option, @@ -108,6 +113,21 @@ impl ResourceManager { ), ), ), + clear_visibility_buffer_bind_group_layout: render_device.create_bind_group_layout( + "meshlet_clear_visibility_buffer_bind_group_layout", + &BindGroupLayoutEntries::single( + ShaderStages::COMPUTE, + texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::WriteOnly), + ), + ), + clear_visibility_buffer_shadow_view_bind_group_layout: render_device + .create_bind_group_layout( + "meshlet_clear_visibility_buffer_shadow_view_bind_group_layout", + &BindGroupLayoutEntries::single( + ShaderStages::COMPUTE, + texture_storage_2d(TextureFormat::R32Uint, StorageTextureAccess::WriteOnly), + ), + ), culling_bind_group_layout: render_device.create_bind_group_layout( "meshlet_culling_bind_group_layout", &BindGroupLayoutEntries::sequential( @@ -136,7 +156,34 @@ impl ResourceManager { texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly) }; ( - storage_buffer_read_only_sized(false, None), + texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::ReadOnly), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + texture_storage_2d( + TextureFormat::R32Float, + StorageTextureAccess::ReadWrite, + ), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + sampler(SamplerBindingType::NonFiltering), + ) + }), + ), + downsample_depth_shadow_view_bind_group_layout: render_device.create_bind_group_layout( + "meshlet_downsample_depth_shadow_view_bind_group_layout", + &BindGroupLayoutEntries::sequential(ShaderStages::COMPUTE, { + let write_only_r32float = || { + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly) + }; + ( + texture_storage_2d(TextureFormat::R32Uint, StorageTextureAccess::ReadOnly), write_only_r32float(), write_only_r32float(), write_only_r32float(), @@ -169,16 +216,45 @@ impl ResourceManager { storage_buffer_read_only_sized(false, None), storage_buffer_read_only_sized(false, None), storage_buffer_read_only_sized(false, None), - storage_buffer_sized(false, None), + texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::Atomic), uniform_buffer::(true), ), ), ), + visibility_buffer_raster_shadow_view_bind_group_layout: render_device + .create_bind_group_layout( + "meshlet_visibility_buffer_raster_shadow_view_bind_group_layout", + &BindGroupLayoutEntries::sequential( + ShaderStages::all(), + ( + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + texture_storage_2d( + TextureFormat::R32Uint, + StorageTextureAccess::Atomic, + ), + uniform_buffer::(true), + ), + ), + ), resolve_depth_bind_group_layout: render_device.create_bind_group_layout( "meshlet_resolve_depth_bind_group_layout", &BindGroupLayoutEntries::single( ShaderStages::FRAGMENT, - storage_buffer_read_only_sized(false, None), + texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::ReadOnly), + ), + ), + resolve_depth_shadow_view_bind_group_layout: render_device.create_bind_group_layout( + "meshlet_resolve_depth_shadow_view_bind_group_layout", + &BindGroupLayoutEntries::single( + ShaderStages::FRAGMENT, + texture_storage_2d(TextureFormat::R32Uint, StorageTextureAccess::ReadOnly), ), ), resolve_material_depth_bind_group_layout: render_device.create_bind_group_layout( @@ -186,7 +262,7 @@ impl ResourceManager { &BindGroupLayoutEntries::sequential( ShaderStages::FRAGMENT, ( - storage_buffer_read_only_sized(false, None), + texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::ReadOnly), storage_buffer_read_only_sized(false, None), storage_buffer_read_only_sized(false, None), ), @@ -197,7 +273,7 @@ impl ResourceManager { &BindGroupLayoutEntries::sequential( ShaderStages::FRAGMENT, ( - storage_buffer_read_only_sized(false, None), + texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::ReadOnly), storage_buffer_read_only_sized(false, None), storage_buffer_read_only_sized(false, None), storage_buffer_read_only_sized(false, None), @@ -234,7 +310,7 @@ pub struct MeshletViewResources { pub second_pass_candidates_buffer: Buffer, instance_visibility: Buffer, pub dummy_render_target: CachedTexture, - pub visibility_buffer: Buffer, + pub visibility_buffer: CachedTexture, pub visibility_buffer_software_raster_indirect_args_first: Buffer, pub visibility_buffer_software_raster_indirect_args_second: Buffer, pub visibility_buffer_hardware_raster_indirect_args_first: Buffer, @@ -244,12 +320,14 @@ pub struct MeshletViewResources { pub material_depth: Option, pub view_size: UVec2, pub raster_cluster_rightmost_slot: u32, + not_shadow_view: bool, } #[derive(Component)] pub struct MeshletViewBindGroups { pub first_node: Arc, pub fill_cluster_buffers: BindGroup, + pub clear_visibility_buffer: BindGroup, pub culling_first: BindGroup, pub culling_second: BindGroup, pub downsample_depth: BindGroup, @@ -377,7 +455,7 @@ pub fn prepare_meshlet_per_frame_resources( let index = instance_index / 32; let bit = instance_index - index * 32; if vec.len() <= index { - vec.extend(iter::repeat(0).take(index - vec.len() + 1)); + vec.extend(iter::repeat_n(0, index - vec.len() + 1)); } vec[index] |= 1 << bit; } @@ -419,18 +497,27 @@ pub fn prepare_meshlet_per_frame_resources( }, ); - let type_size = if not_shadow_view { - size_of::() - } else { - size_of::() - } as u64; - // TODO: Cache - let visibility_buffer = render_device.create_buffer(&BufferDescriptor { - label: Some("meshlet_visibility_buffer"), - size: type_size * (view.viewport.z * view.viewport.w) as u64, - usage: BufferUsages::STORAGE, - mapped_at_creation: false, - }); + let visibility_buffer = texture_cache.get( + &render_device, + TextureDescriptor { + label: Some("meshlet_visibility_buffer"), + size: Extent3d { + width: view.viewport.z, + height: view.viewport.w, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: TextureDimension::D2, + format: if not_shadow_view { + TextureFormat::R64Uint + } else { + TextureFormat::R32Uint + }, + usage: TextureUsages::STORAGE_ATOMIC | TextureUsages::STORAGE_BINDING, + view_formats: &[], + }, + ); let visibility_buffer_software_raster_indirect_args_first = render_device .create_buffer_with_data(&BufferInitDescriptor { @@ -520,6 +607,7 @@ pub fn prepare_meshlet_per_frame_resources( .then(|| texture_cache.get(&render_device, material_depth)), view_size: view.viewport.zw(), raster_cluster_rightmost_slot: resource_manager.raster_cluster_rightmost_slot, + not_shadow_view, }); } } @@ -577,6 +665,16 @@ pub fn prepare_meshlet_view_bind_groups( &entries, ); + let clear_visibility_buffer = render_device.create_bind_group( + "meshlet_clear_visibility_buffer_bind_group", + if view_resources.not_shadow_view { + &resource_manager.clear_visibility_buffer_bind_group_layout + } else { + &resource_manager.clear_visibility_buffer_shadow_view_bind_group_layout + }, + &BindGroupEntries::single(&view_resources.visibility_buffer.default_view), + ); + let entries = BindGroupEntries::sequential(( cluster_meshlet_ids.as_entire_binding(), meshlet_mesh_manager.meshlet_bounding_spheres.binding(), @@ -638,8 +736,12 @@ pub fn prepare_meshlet_view_bind_groups( let downsample_depth = view_resources.depth_pyramid.create_bind_group( &render_device, "meshlet_downsample_depth_bind_group", - &resource_manager.downsample_depth_bind_group_layout, - view_resources.visibility_buffer.as_entire_binding(), + if view_resources.not_shadow_view { + &resource_manager.downsample_depth_bind_group_layout + } else { + &resource_manager.downsample_depth_shadow_view_bind_group_layout + }, + &view_resources.visibility_buffer.default_view, &resource_manager.depth_pyramid_sampler, ); @@ -656,24 +758,32 @@ pub fn prepare_meshlet_view_bind_groups( resource_manager .software_raster_cluster_count .as_entire_binding(), - view_resources.visibility_buffer.as_entire_binding(), + &view_resources.visibility_buffer.default_view, view_uniforms.clone(), )); let visibility_buffer_raster = render_device.create_bind_group( "meshlet_visibility_raster_buffer_bind_group", - &resource_manager.visibility_buffer_raster_bind_group_layout, + if view_resources.not_shadow_view { + &resource_manager.visibility_buffer_raster_bind_group_layout + } else { + &resource_manager.visibility_buffer_raster_shadow_view_bind_group_layout + }, &entries, ); let resolve_depth = render_device.create_bind_group( "meshlet_resolve_depth_bind_group", - &resource_manager.resolve_depth_bind_group_layout, - &BindGroupEntries::single(view_resources.visibility_buffer.as_entire_binding()), + if view_resources.not_shadow_view { + &resource_manager.resolve_depth_bind_group_layout + } else { + &resource_manager.resolve_depth_shadow_view_bind_group_layout + }, + &BindGroupEntries::single(&view_resources.visibility_buffer.default_view), ); let resolve_material_depth = view_resources.material_depth.as_ref().map(|_| { let entries = BindGroupEntries::sequential(( - view_resources.visibility_buffer.as_entire_binding(), + &view_resources.visibility_buffer.default_view, cluster_instance_ids.as_entire_binding(), instance_manager.instance_material_ids.binding().unwrap(), )); @@ -686,7 +796,7 @@ pub fn prepare_meshlet_view_bind_groups( let material_shade = view_resources.material_depth.as_ref().map(|_| { let entries = BindGroupEntries::sequential(( - view_resources.visibility_buffer.as_entire_binding(), + &view_resources.visibility_buffer.default_view, cluster_meshlet_ids.as_entire_binding(), meshlet_mesh_manager.meshlets.binding(), meshlet_mesh_manager.indices.binding(), @@ -738,6 +848,7 @@ pub fn prepare_meshlet_view_bind_groups( commands.entity(view_entity).insert(MeshletViewBindGroups { first_node: Arc::clone(&first_node), fill_cluster_buffers, + clear_visibility_buffer, culling_first, culling_second, downsample_depth, diff --git a/crates/bevy_pbr/src/meshlet/visibility_buffer_hardware_raster.wgsl b/crates/bevy_pbr/src/meshlet/visibility_buffer_hardware_raster.wgsl index fb2e090051..3525d38e6d 100644 --- a/crates/bevy_pbr/src/meshlet/visibility_buffer_hardware_raster.wgsl +++ b/crates/bevy_pbr/src/meshlet/visibility_buffer_hardware_raster.wgsl @@ -54,16 +54,13 @@ fn vertex(@builtin(instance_index) instance_index: u32, @builtin(vertex_index) v @fragment fn fragment(vertex_output: VertexOutput) { - let frag_coord_1d = u32(vertex_output.position.y) * u32(view.viewport.z) + u32(vertex_output.position.x); - + let depth = bitcast(vertex_output.position.z); #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT - let depth = bitcast(vertex_output.position.z); let visibility = (u64(depth) << 32u) | u64(vertex_output.packed_ids); - atomicMax(&meshlet_visibility_buffer[frag_coord_1d], visibility); #else - let depth = bitcast(vertex_output.position.z); - atomicMax(&meshlet_visibility_buffer[frag_coord_1d], depth); + let visibility = depth; #endif + textureAtomicMax(meshlet_visibility_buffer, vec2(vertex_output.position.xy), visibility); } fn dummy_vertex() -> VertexOutput { diff --git a/crates/bevy_pbr/src/meshlet/visibility_buffer_raster_node.rs b/crates/bevy_pbr/src/meshlet/visibility_buffer_raster_node.rs index 7b75f241af..20054d2d2f 100644 --- a/crates/bevy_pbr/src/meshlet/visibility_buffer_raster_node.rs +++ b/crates/bevy_pbr/src/meshlet/visibility_buffer_raster_node.rs @@ -9,7 +9,7 @@ use bevy_ecs::{ query::QueryState, world::{FromWorld, World}, }; -use bevy_math::ops; +use bevy_math::{ops, UVec2}; use bevy_render::{ camera::ExtractedCamera, render_graph::{Node, NodeRunError, RenderGraphContext}, @@ -77,6 +77,8 @@ impl Node for MeshletVisibilityBufferRasterPassNode { let Some(( fill_cluster_buffers_pipeline, + clear_visibility_buffer_pipeline, + clear_visibility_buffer_shadow_view_pipeline, culling_first_pipeline, culling_second_pipeline, downsample_depth_first_pipeline, @@ -84,10 +86,10 @@ impl Node for MeshletVisibilityBufferRasterPassNode { downsample_depth_first_shadow_view_pipeline, downsample_depth_second_shadow_view_pipeline, visibility_buffer_software_raster_pipeline, - visibility_buffer_software_raster_depth_only_pipeline, + visibility_buffer_software_raster_shadow_view_pipeline, visibility_buffer_hardware_raster_pipeline, - visibility_buffer_hardware_raster_depth_only_pipeline, - visibility_buffer_hardware_raster_depth_only_unclipped_pipeline, + visibility_buffer_hardware_raster_shadow_view_pipeline, + visibility_buffer_hardware_raster_shadow_view_unclipped_pipeline, resolve_depth_pipeline, resolve_depth_shadow_view_pipeline, resolve_material_depth_pipeline, @@ -107,11 +109,6 @@ impl Node for MeshletVisibilityBufferRasterPassNode { render_context .command_encoder() .push_debug_group("meshlet_visibility_buffer_raster"); - render_context.command_encoder().clear_buffer( - &meshlet_view_resources.second_pass_candidates_buffer, - 0, - None, - ); if first_node { fill_cluster_buffers_pass( render_context, @@ -120,6 +117,17 @@ impl Node for MeshletVisibilityBufferRasterPassNode { meshlet_view_resources.scene_instance_count, ); } + clear_visibility_buffer_pass( + render_context, + &meshlet_view_bind_groups.clear_visibility_buffer, + clear_visibility_buffer_pipeline, + meshlet_view_resources.view_size, + ); + render_context.command_encoder().clear_buffer( + &meshlet_view_resources.second_pass_candidates_buffer, + 0, + None, + ); cull_pass( "culling_first", render_context, @@ -189,7 +197,6 @@ impl Node for MeshletVisibilityBufferRasterPassNode { resolve_depth( render_context, view_depth.get_attachment(StoreOp::Store), - meshlet_view_resources, meshlet_view_bind_groups, resolve_depth_pipeline, camera, @@ -226,15 +233,21 @@ impl Node for MeshletVisibilityBufferRasterPassNode { let shadow_visibility_buffer_hardware_raster_pipeline = if let LightEntity::Directional { .. } = light_type { - visibility_buffer_hardware_raster_depth_only_unclipped_pipeline + visibility_buffer_hardware_raster_shadow_view_unclipped_pipeline } else { - visibility_buffer_hardware_raster_depth_only_pipeline + visibility_buffer_hardware_raster_shadow_view_pipeline }; render_context.command_encoder().push_debug_group(&format!( "meshlet_visibility_buffer_raster: {}", shadow_view.pass_name )); + clear_visibility_buffer_pass( + render_context, + &meshlet_view_bind_groups.clear_visibility_buffer, + clear_visibility_buffer_shadow_view_pipeline, + meshlet_view_resources.view_size, + ); render_context.command_encoder().clear_buffer( &meshlet_view_resources.second_pass_candidates_buffer, 0, @@ -264,7 +277,7 @@ impl Node for MeshletVisibilityBufferRasterPassNode { &meshlet_view_resources.dummy_render_target.default_view, meshlet_view_bind_groups, view_offset, - visibility_buffer_software_raster_depth_only_pipeline, + visibility_buffer_software_raster_shadow_view_pipeline, shadow_visibility_buffer_hardware_raster_pipeline, None, meshlet_view_resources.raster_cluster_rightmost_slot, @@ -301,7 +314,7 @@ impl Node for MeshletVisibilityBufferRasterPassNode { &meshlet_view_resources.dummy_render_target.default_view, meshlet_view_bind_groups, view_offset, - visibility_buffer_software_raster_depth_only_pipeline, + visibility_buffer_software_raster_shadow_view_pipeline, shadow_visibility_buffer_hardware_raster_pipeline, None, meshlet_view_resources.raster_cluster_rightmost_slot, @@ -309,7 +322,6 @@ impl Node for MeshletVisibilityBufferRasterPassNode { resolve_depth( render_context, shadow_view.depth_attachment.get_attachment(StoreOp::Store), - meshlet_view_resources, meshlet_view_bind_groups, resolve_depth_shadow_view_pipeline, camera, @@ -362,6 +374,29 @@ fn fill_cluster_buffers_pass( ); } +// TODO: Replace this with vkCmdClearColorImage once wgpu supports it +fn clear_visibility_buffer_pass( + render_context: &mut RenderContext, + clear_visibility_buffer_bind_group: &BindGroup, + clear_visibility_buffer_pipeline: &ComputePipeline, + view_size: UVec2, +) { + let command_encoder = render_context.command_encoder(); + let mut clear_visibility_buffer_pass = + command_encoder.begin_compute_pass(&ComputePassDescriptor { + label: Some("clear_visibility_buffer"), + timestamp_writes: None, + }); + clear_visibility_buffer_pass.set_pipeline(clear_visibility_buffer_pipeline); + clear_visibility_buffer_pass.set_push_constants(0, bytemuck::bytes_of(&view_size)); + clear_visibility_buffer_pass.set_bind_group(0, clear_visibility_buffer_bind_group, &[]); + clear_visibility_buffer_pass.dispatch_workgroups( + view_size.x.div_ceil(16), + view_size.y.div_ceil(16), + 1, + ); +} + fn cull_pass( label: &'static str, render_context: &mut RenderContext, @@ -478,7 +513,6 @@ fn raster_pass( fn resolve_depth( render_context: &mut RenderContext, depth_stencil_attachment: RenderPassDepthStencilAttachment, - meshlet_view_resources: &MeshletViewResources, meshlet_view_bind_groups: &MeshletViewBindGroups, resolve_depth_pipeline: &RenderPipeline, camera: &ExtractedCamera, @@ -494,11 +528,6 @@ fn resolve_depth( resolve_pass.set_camera_viewport(viewport); } resolve_pass.set_render_pipeline(resolve_depth_pipeline); - resolve_pass.set_push_constants( - ShaderStages::FRAGMENT, - 0, - &meshlet_view_resources.view_size.x.to_le_bytes(), - ); resolve_pass.set_bind_group(0, &meshlet_view_bind_groups.resolve_depth, &[]); resolve_pass.draw(0..3, 0..1); } @@ -532,11 +561,6 @@ fn resolve_material_depth( resolve_pass.set_camera_viewport(viewport); } resolve_pass.set_render_pipeline(resolve_material_depth_pipeline); - resolve_pass.set_push_constants( - ShaderStages::FRAGMENT, - 0, - &meshlet_view_resources.view_size.x.to_le_bytes(), - ); resolve_pass.set_bind_group(0, resolve_material_depth_bind_group, &[]); resolve_pass.draw(0..3, 0..1); } diff --git a/crates/bevy_pbr/src/meshlet/visibility_buffer_resolve.wgsl b/crates/bevy_pbr/src/meshlet/visibility_buffer_resolve.wgsl index f28645013d..4c56c5874a 100644 --- a/crates/bevy_pbr/src/meshlet/visibility_buffer_resolve.wgsl +++ b/crates/bevy_pbr/src/meshlet/visibility_buffer_resolve.wgsl @@ -104,8 +104,7 @@ struct VertexOutput { /// Load the visibility buffer texture and resolve it into a VertexOutput. fn resolve_vertex_output(frag_coord: vec4) -> VertexOutput { - let frag_coord_1d = u32(frag_coord.y) * u32(view.viewport.z) + u32(frag_coord.x); - let packed_ids = u32(meshlet_visibility_buffer[frag_coord_1d]); // TODO: Might be faster to load the correct u32 directly + let packed_ids = u32(textureLoad(meshlet_visibility_buffer, vec2(frag_coord.xy)).r); let cluster_id = packed_ids >> 7u; let meshlet_id = meshlet_cluster_meshlet_ids[cluster_id]; var meshlet = meshlets[meshlet_id]; diff --git a/crates/bevy_pbr/src/meshlet/visibility_buffer_software_raster.wgsl b/crates/bevy_pbr/src/meshlet/visibility_buffer_software_raster.wgsl index 941c31f093..60f6f1b3ea 100644 --- a/crates/bevy_pbr/src/meshlet/visibility_buffer_software_raster.wgsl +++ b/crates/bevy_pbr/src/meshlet/visibility_buffer_software_raster.wgsl @@ -167,16 +167,13 @@ fn rasterize_cluster( } fn write_visibility_buffer_pixel(x: f32, y: f32, z: f32, packed_ids: u32) { - let frag_coord_1d = u32(y * view.viewport.z + x); - + let depth = bitcast(z); #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT - let depth = bitcast(z); let visibility = (u64(depth) << 32u) | u64(packed_ids); - atomicMax(&meshlet_visibility_buffer[frag_coord_1d], visibility); #else - let depth = bitcast(z); - atomicMax(&meshlet_visibility_buffer[frag_coord_1d], depth); + let visibility = depth; #endif + textureAtomicMax(meshlet_visibility_buffer, vec2(u32(x), u32(y)), visibility); } fn edge_function(a: vec2, b: vec2, c: vec2) -> f32 { diff --git a/crates/bevy_pbr/src/parallax.rs b/crates/bevy_pbr/src/parallax.rs index e458f88146..0a847b7c25 100644 --- a/crates/bevy_pbr/src/parallax.rs +++ b/crates/bevy_pbr/src/parallax.rs @@ -1,4 +1,4 @@ -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; /// The [parallax mapping] method to use to compute depth based on the /// material's [`depth_map`]. @@ -12,6 +12,7 @@ use bevy_reflect::Reflect; /// [`depth_map`]: crate::StandardMaterial::depth_map /// [parallax mapping]: https://en.wikipedia.org/wiki/Parallax_mapping #[derive(Debug, Copy, Clone, PartialEq, Eq, Default, Reflect)] +#[reflect(Default, Clone, PartialEq)] pub enum ParallaxMappingMethod { /// A simple linear interpolation, using a single texture sample. /// diff --git a/crates/bevy_pbr/src/pbr_material.rs b/crates/bevy_pbr/src/pbr_material.rs index 48353eb67c..fd1babd8ec 100644 --- a/crates/bevy_pbr/src/pbr_material.rs +++ b/crates/bevy_pbr/src/pbr_material.rs @@ -4,6 +4,7 @@ use bevy_math::{Affine2, Affine3, Mat2, Mat3, Vec2, Vec3, Vec4}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ mesh::MeshVertexBufferLayoutRef, render_asset::RenderAssets, render_resource::*, + texture::GpuImage, }; use bitflags::bitflags; @@ -16,7 +17,7 @@ use crate::{deferred::DEFAULT_PBR_DEFERRED_LIGHTING_PASS_ID, *}; /// [`bevy_render::mesh::Mesh::ATTRIBUTE_UV_1`]. /// The default is [`UvChannel::Uv0`]. #[derive(Reflect, Default, Debug, Clone, PartialEq, Eq)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone, PartialEq)] pub enum UvChannel { #[default] Uv0, @@ -30,9 +31,9 @@ pub enum UvChannel { /// May be created directly from a [`Color`] or an [`Image`]. #[derive(Asset, AsBindGroup, Reflect, Debug, Clone)] #[bind_group_data(StandardMaterialKey)] -#[uniform(0, StandardMaterialUniform)] -#[bindless(16)] -#[reflect(Default, Debug)] +#[data(0, StandardMaterialUniform, binding_array(10))] +#[bindless(index_table(range(0..31)))] +#[reflect(Default, Debug, Clone)] pub struct StandardMaterial { /// The color of the surface of the material before lighting. /// @@ -249,13 +250,13 @@ pub struct StandardMaterial { /// with distortion and blur effects. /// /// - [`Camera3d::screen_space_specular_transmission_steps`](bevy_core_pipeline::core_3d::Camera3d::screen_space_specular_transmission_steps) can be used to enable transmissive objects - /// to be seen through other transmissive objects, at the cost of additional draw calls and texture copies; (Use with caution!) - /// - If a simplified approximation of specular transmission using only environment map lighting is sufficient, consider setting - /// [`Camera3d::screen_space_specular_transmission_steps`](bevy_core_pipeline::core_3d::Camera3d::screen_space_specular_transmission_steps) to `0`. + /// to be seen through other transmissive objects, at the cost of additional draw calls and texture copies; (Use with caution!) + /// - If a simplified approximation of specular transmission using only environment map lighting is sufficient, consider setting + /// [`Camera3d::screen_space_specular_transmission_steps`](bevy_core_pipeline::core_3d::Camera3d::screen_space_specular_transmission_steps) to `0`. /// - If purely diffuse light transmission is needed, (i.e. “translucency”) consider using [`StandardMaterial::diffuse_transmission`] instead, - /// for a much less expensive effect. + /// for a much less expensive effect. /// - Specular transmission is rendered before alpha blending, so any material with [`AlphaMode::Blend`], [`AlphaMode::Premultiplied`], [`AlphaMode::Add`] or [`AlphaMode::Multiply`] - /// won't be visible through specular transmissive materials. + /// won't be visible through specular transmissive materials. #[doc(alias = "refraction")] pub specular_transmission: f32, @@ -436,8 +437,8 @@ pub struct StandardMaterial { /// the [`StandardMaterial::specular_tint_texture`] has no alpha value, it /// may be desirable to pack the values together and supply the same /// texture to both fields. - #[texture(27)] - #[sampler(28)] + #[cfg_attr(feature = "pbr_specular_textures", texture(27))] + #[cfg_attr(feature = "pbr_specular_textures", sampler(28))] #[cfg(feature = "pbr_specular_textures")] pub specular_texture: Option>, @@ -457,9 +458,9 @@ pub struct StandardMaterial { /// /// Like the fixed specular tint value, this texture map isn't supported in /// the deferred renderer. + #[cfg_attr(feature = "pbr_specular_textures", texture(29))] + #[cfg_attr(feature = "pbr_specular_textures", sampler(30))] #[cfg(feature = "pbr_specular_textures")] - #[texture(29)] - #[sampler(30)] pub specular_tint_texture: Option>, /// An extra thin translucent layer on top of the main PBR layer. This is @@ -630,7 +631,7 @@ pub struct StandardMaterial { /// /// [`Mesh`]: bevy_render::mesh::Mesh // TODO: include this in reflection somehow (maybe via remote types like serde https://serde.rs/remote-derive.html) - #[reflect(ignore)] + #[reflect(ignore, clone)] pub cull_mode: Option, /// Whether to apply only the base color to this material. diff --git a/crates/bevy_pbr/src/prepass/mod.rs b/crates/bevy_pbr/src/prepass/mod.rs index d1cb99e502..77f874c168 100644 --- a/crates/bevy_pbr/src/prepass/mod.rs +++ b/crates/bevy_pbr/src/prepass/mod.rs @@ -3,11 +3,11 @@ mod prepass_bindings; use crate::{ alpha_mode_pipeline_key, binding_arrays_are_usable, buffer_layout, collect_meshes_for_gpu_building, material_bind_groups::MaterialBindGroupAllocator, - queue_material_meshes, setup_morph_and_skinning_defs, skin, DrawMesh, - EntitySpecializationTicks, Material, MaterialPipeline, MaterialPipelineKey, MeshLayouts, - MeshPipeline, MeshPipelineKey, OpaqueRendererMethod, PreparedMaterial, RenderLightmaps, - RenderMaterialInstances, RenderMeshInstanceFlags, RenderMeshInstances, RenderPhaseType, - SetMaterialBindGroup, SetMeshBindGroup, ShadowView, StandardMaterial, + queue_material_meshes, set_mesh_motion_vector_flags, setup_morph_and_skinning_defs, skin, + DrawMesh, EntitySpecializationTicks, Material, MaterialPipeline, MaterialPipelineKey, + MeshLayouts, MeshPipeline, MeshPipelineKey, OpaqueRendererMethod, PreparedMaterial, + RenderLightmaps, RenderMaterialInstances, RenderMeshInstanceFlags, RenderMeshInstances, + RenderPhaseType, SetMaterialBindGroup, SetMeshBindGroup, ShadowView, StandardMaterial, }; use bevy_app::{App, Plugin, PreUpdate}; use bevy_render::{ @@ -18,8 +18,8 @@ use bevy_render::{ render_resource::binding_types::uniform_buffer, renderer::RenderAdapter, sync_world::RenderEntity, - view::{RenderVisibilityRanges, VISIBILITY_RANGES_STORAGE_BUFFER_COUNT}, - ExtractSchedule, Render, RenderApp, RenderSet, + view::{RenderVisibilityRanges, RetainedViewEntity, VISIBILITY_RANGES_STORAGE_BUFFER_COUNT}, + ExtractSchedule, Render, RenderApp, RenderDebugFlags, RenderSet, }; pub use prepass_bindings::*; @@ -46,7 +46,7 @@ use bevy_render::{ Extract, }; use bevy_transform::prelude::GlobalTransform; -use tracing::error; +use tracing::{error, warn}; #[cfg(feature = "meshlet")] use crate::meshlet::{ @@ -56,10 +56,9 @@ use crate::meshlet::{ use bevy_derive::{Deref, DerefMut}; use bevy_ecs::component::Tick; -use bevy_ecs::entity::EntityHash; use bevy_ecs::system::SystemChangeTick; -use bevy_platform_support::collections::HashMap; -use bevy_render::sync_world::{MainEntity, MainEntityHashMap}; +use bevy_platform::collections::HashMap; +use bevy_render::sync_world::MainEntityHashMap; use bevy_render::view::RenderVisibleEntities; use bevy_render::RenderSet::{PrepareAssets, PrepareResources}; use core::{hash::Hash, marker::PhantomData}; @@ -146,11 +145,19 @@ where /// Sets up the prepasses for a [`Material`]. /// /// This depends on the [`PrepassPipelinePlugin`]. -pub struct PrepassPlugin(PhantomData); +pub struct PrepassPlugin { + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, + pub phantom: PhantomData, +} -impl Default for PrepassPlugin { - fn default() -> Self { - Self(Default::default()) +impl PrepassPlugin { + /// Creates a new [`PrepassPlugin`] with the given debug flags. + pub fn new(debug_flags: RenderDebugFlags) -> Self { + PrepassPlugin { + debug_flags, + phantom: PhantomData, + } } } @@ -176,8 +183,10 @@ where ), ) .add_plugins(( - BinnedRenderPhasePlugin::::default(), - BinnedRenderPhasePlugin::::default(), + BinnedRenderPhasePlugin::::new(self.debug_flags), + BinnedRenderPhasePlugin::::new( + self.debug_flags, + ), )); } @@ -210,7 +219,8 @@ where .in_set(RenderSet::PrepareMeshes) .after(prepare_assets::>) .after(prepare_assets::) - .after(collect_meshes_for_gpu_building), + .after(collect_meshes_for_gpu_building) + .after(set_mesh_motion_vector_flags), queue_prepass_material_meshes:: .in_set(RenderSet::QueueMeshes) .after(prepare_assets::>) @@ -259,25 +269,34 @@ type PreviousMeshFilter = Or<(With, With)>; pub fn update_mesh_previous_global_transforms( mut commands: Commands, views: Query<&Camera, Or<(With, With)>>, - meshes: Query<(Entity, &GlobalTransform, Option<&PreviousGlobalTransform>), PreviousMeshFilter>, + new_meshes: Query< + (Entity, &GlobalTransform), + (PreviousMeshFilter, Without), + >, + mut meshes: Query<(&GlobalTransform, &mut PreviousGlobalTransform), PreviousMeshFilter>, ) { let should_run = views.iter().any(|camera| camera.is_active); if should_run { - for (entity, transform, old_previous_transform) in &meshes { + for (entity, transform) in &new_meshes { let new_previous_transform = PreviousGlobalTransform(transform.affine()); - // Make sure not to trigger change detection on - // `PreviousGlobalTransform` if the previous transform hasn't - // changed. - if old_previous_transform != Some(&new_previous_transform) { - commands.entity(entity).try_insert(new_previous_transform); - } + commands.entity(entity).try_insert(new_previous_transform); } + meshes.par_iter_mut().for_each(|(transform, mut previous)| { + previous.set_if_neq(PreviousGlobalTransform(transform.affine())); + }); } } #[derive(Resource)] pub struct PrepassPipeline { + pub internal: PrepassPipelineInternal, + pub material_pipeline: MaterialPipeline, +} + +/// Internal fields of the `PrepassPipeline` that don't need the generic bound +/// This is done as an optimization to not recompile the same code multiple time +pub struct PrepassPipelineInternal { pub view_layout_motion_vectors: BindGroupLayout, pub view_layout_no_motion_vectors: BindGroupLayout, pub mesh_layouts: MeshLayouts, @@ -286,7 +305,6 @@ pub struct PrepassPipeline { pub prepass_material_fragment_shader: Option>, pub deferred_material_vertex_shader: Option>, pub deferred_material_fragment_shader: Option>, - pub material_pipeline: MaterialPipeline, /// Whether skins will use uniform buffers on account of storage buffers /// being unavailable on this platform. @@ -297,8 +315,6 @@ pub struct PrepassPipeline { /// Whether binding arrays (a.k.a. bindless textures) are usable on the /// current render device. pub binding_arrays_are_usable: bool, - - _marker: PhantomData, } impl FromWorld for PrepassPipeline { @@ -363,8 +379,7 @@ impl FromWorld for PrepassPipeline { let depth_clip_control_supported = render_device .features() .contains(WgpuFeatures::DEPTH_CLIP_CONTROL); - - PrepassPipeline { + let internal = PrepassPipelineInternal { view_layout_motion_vectors, view_layout_no_motion_vectors, mesh_layouts: mesh_pipeline.mesh_layouts.clone(), @@ -389,11 +404,13 @@ impl FromWorld for PrepassPipeline { ShaderRef::Path(path) => Some(asset_server.load(path)), }, material_layout: M::bind_group_layout(render_device), - material_pipeline: world.resource::>().clone(), skins_use_uniform_buffers: skin::skins_use_uniform_buffers(render_device), depth_clip_control_supported, binding_arrays_are_usable: binding_arrays_are_usable(render_device, render_adapter), - _marker: PhantomData, + }; + PrepassPipeline { + internal, + material_pipeline: world.resource::>().clone(), } } } @@ -409,15 +426,38 @@ where key: Self::Key, layout: &MeshVertexBufferLayoutRef, ) -> Result { - let mut bind_group_layouts = vec![if key - .mesh_key + let mut shader_defs = Vec::new(); + if self.material_pipeline.bindless { + shader_defs.push("BINDLESS".into()); + } + let mut descriptor = self + .internal + .specialize(key.mesh_key, shader_defs, layout)?; + + // This is a bit risky because it's possible to change something that would + // break the prepass but be fine in the main pass. + // Since this api is pretty low-level it doesn't matter that much, but it is a potential issue. + M::specialize(&self.material_pipeline, &mut descriptor, layout, key)?; + + Ok(descriptor) + } +} + +impl PrepassPipelineInternal { + fn specialize( + &self, + mesh_key: MeshPipelineKey, + shader_defs: Vec, + layout: &MeshVertexBufferLayoutRef, + ) -> Result { + let mut shader_defs = shader_defs; + let mut bind_group_layouts = vec![if mesh_key .contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) { self.view_layout_motion_vectors.clone() } else { self.view_layout_no_motion_vectors.clone() }]; - let mut shader_defs = Vec::new(); let mut vertex_attributes = Vec::new(); // Let the shader code know that it's running in a prepass pipeline. @@ -428,40 +468,29 @@ where // NOTE: Eventually, it would be nice to only add this when the shaders are overloaded by the Material. // The main limitation right now is that bind group order is hardcoded in shaders. bind_group_layouts.push(self.material_layout.clone()); - #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] shader_defs.push("WEBGL2".into()); - shader_defs.push("VERTEX_OUTPUT_INSTANCE_INDEX".into()); - - if key.mesh_key.contains(MeshPipelineKey::DEPTH_PREPASS) { + if mesh_key.contains(MeshPipelineKey::DEPTH_PREPASS) { shader_defs.push("DEPTH_PREPASS".into()); } - - if key.mesh_key.contains(MeshPipelineKey::MAY_DISCARD) { + if mesh_key.contains(MeshPipelineKey::MAY_DISCARD) { shader_defs.push("MAY_DISCARD".into()); } - - let blend_key = key - .mesh_key - .intersection(MeshPipelineKey::BLEND_RESERVED_BITS); + let blend_key = mesh_key.intersection(MeshPipelineKey::BLEND_RESERVED_BITS); if blend_key == MeshPipelineKey::BLEND_PREMULTIPLIED_ALPHA { shader_defs.push("BLEND_PREMULTIPLIED_ALPHA".into()); } if blend_key == MeshPipelineKey::BLEND_ALPHA { shader_defs.push("BLEND_ALPHA".into()); } - if layout.0.contains(Mesh::ATTRIBUTE_POSITION) { shader_defs.push("VERTEX_POSITIONS".into()); vertex_attributes.push(Mesh::ATTRIBUTE_POSITION.at_shader_location(0)); } - // For directional light shadow map views, use unclipped depth via either the native GPU feature, // or emulated by setting depth in the fragment shader for GPUs that don't support it natively. - let emulate_unclipped_depth = key - .mesh_key - .contains(MeshPipelineKey::UNCLIPPED_DEPTH_ORTHO) + let emulate_unclipped_depth = mesh_key.contains(MeshPipelineKey::UNCLIPPED_DEPTH_ORTHO) && !self.depth_clip_control_supported; if emulate_unclipped_depth { shader_defs.push("UNCLIPPED_DEPTH_ORTHO_EMULATION".into()); @@ -473,123 +502,93 @@ where // https://github.com/bevyengine/bevy/pull/8877 shader_defs.push("PREPASS_FRAGMENT".into()); } - let unclipped_depth = key - .mesh_key - .contains(MeshPipelineKey::UNCLIPPED_DEPTH_ORTHO) + let unclipped_depth = mesh_key.contains(MeshPipelineKey::UNCLIPPED_DEPTH_ORTHO) && self.depth_clip_control_supported; - if layout.0.contains(Mesh::ATTRIBUTE_UV_0) { shader_defs.push("VERTEX_UVS".into()); shader_defs.push("VERTEX_UVS_A".into()); vertex_attributes.push(Mesh::ATTRIBUTE_UV_0.at_shader_location(1)); } - if layout.0.contains(Mesh::ATTRIBUTE_UV_1) { shader_defs.push("VERTEX_UVS".into()); shader_defs.push("VERTEX_UVS_B".into()); vertex_attributes.push(Mesh::ATTRIBUTE_UV_1.at_shader_location(2)); } - - if key.mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS) { + if mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS) { shader_defs.push("NORMAL_PREPASS".into()); } - - if key - .mesh_key - .intersects(MeshPipelineKey::NORMAL_PREPASS | MeshPipelineKey::DEFERRED_PREPASS) + if mesh_key.intersects(MeshPipelineKey::NORMAL_PREPASS | MeshPipelineKey::DEFERRED_PREPASS) { - vertex_attributes.push(Mesh::ATTRIBUTE_NORMAL.at_shader_location(3)); shader_defs.push("NORMAL_PREPASS_OR_DEFERRED_PREPASS".into()); + if layout.0.contains(Mesh::ATTRIBUTE_NORMAL) { + shader_defs.push("VERTEX_NORMALS".into()); + vertex_attributes.push(Mesh::ATTRIBUTE_NORMAL.at_shader_location(3)); + } else if mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS) { + warn!( + "The default normal prepass expects the mesh to have vertex normal attributes." + ); + } if layout.0.contains(Mesh::ATTRIBUTE_TANGENT) { shader_defs.push("VERTEX_TANGENTS".into()); vertex_attributes.push(Mesh::ATTRIBUTE_TANGENT.at_shader_location(4)); } } - - if key - .mesh_key + if mesh_key .intersects(MeshPipelineKey::MOTION_VECTOR_PREPASS | MeshPipelineKey::DEFERRED_PREPASS) { shader_defs.push("MOTION_VECTOR_PREPASS_OR_DEFERRED_PREPASS".into()); } - - if key.mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { + if mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { shader_defs.push("DEFERRED_PREPASS".into()); } - - if key.mesh_key.contains(MeshPipelineKey::LIGHTMAPPED) { + if mesh_key.contains(MeshPipelineKey::LIGHTMAPPED) { shader_defs.push("LIGHTMAP".into()); } - if key - .mesh_key - .contains(MeshPipelineKey::LIGHTMAP_BICUBIC_SAMPLING) - { + if mesh_key.contains(MeshPipelineKey::LIGHTMAP_BICUBIC_SAMPLING) { shader_defs.push("LIGHTMAP_BICUBIC_SAMPLING".into()); } - if layout.0.contains(Mesh::ATTRIBUTE_COLOR) { shader_defs.push("VERTEX_COLORS".into()); vertex_attributes.push(Mesh::ATTRIBUTE_COLOR.at_shader_location(7)); } - - if key - .mesh_key - .contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) - { + if mesh_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) { shader_defs.push("MOTION_VECTOR_PREPASS".into()); } - - if key.mesh_key.contains(MeshPipelineKey::HAS_PREVIOUS_SKIN) { + if mesh_key.contains(MeshPipelineKey::HAS_PREVIOUS_SKIN) { shader_defs.push("HAS_PREVIOUS_SKIN".into()); } - - if key.mesh_key.contains(MeshPipelineKey::HAS_PREVIOUS_MORPH) { + if mesh_key.contains(MeshPipelineKey::HAS_PREVIOUS_MORPH) { shader_defs.push("HAS_PREVIOUS_MORPH".into()); } - - // If bindless mode is on, add a `BINDLESS` define. - if self.material_pipeline.bindless { - shader_defs.push("BINDLESS".into()); - } - if self.binding_arrays_are_usable { shader_defs.push("MULTIPLE_LIGHTMAPS_IN_ARRAY".into()); } - - if key - .mesh_key - .contains(MeshPipelineKey::VISIBILITY_RANGE_DITHER) - { + if mesh_key.contains(MeshPipelineKey::VISIBILITY_RANGE_DITHER) { shader_defs.push("VISIBILITY_RANGE_DITHER".into()); } - - if key.mesh_key.intersects( + if mesh_key.intersects( MeshPipelineKey::NORMAL_PREPASS | MeshPipelineKey::MOTION_VECTOR_PREPASS | MeshPipelineKey::DEFERRED_PREPASS, ) { shader_defs.push("PREPASS_FRAGMENT".into()); } - let bind_group = setup_morph_and_skinning_defs( &self.mesh_layouts, layout, 5, - &key.mesh_key, + &mesh_key, &mut shader_defs, &mut vertex_attributes, self.skins_use_uniform_buffers, ); bind_group_layouts.insert(1, bind_group); - let vertex_buffer_layout = layout.0.get_layout(&vertex_attributes)?; - // Setup prepass fragment targets - normals in slot 0 (or None if not needed), motion vectors in slot 1 let mut targets = prepass_target_descriptors( - key.mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS), - key.mesh_key - .contains(MeshPipelineKey::MOTION_VECTOR_PREPASS), - key.mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS), + mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS), + mesh_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS), + mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS), ); if targets.iter().all(Option::is_none) { @@ -603,12 +602,12 @@ where // prepass shader, or we are emulating unclipped depth in the fragment shader. let fragment_required = !targets.is_empty() || emulate_unclipped_depth - || (key.mesh_key.contains(MeshPipelineKey::MAY_DISCARD) + || (mesh_key.contains(MeshPipelineKey::MAY_DISCARD) && self.prepass_material_fragment_shader.is_some()); let fragment = fragment_required.then(|| { // Use the fragment shader from the material - let frag_shader_handle = if key.mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { + let frag_shader_handle = if mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { match self.deferred_material_fragment_shader.clone() { Some(frag_shader_handle) => frag_shader_handle, _ => PREPASS_SHADER_HANDLE, @@ -629,7 +628,7 @@ where }); // Use the vertex shader from the material if present - let vert_shader_handle = if key.mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { + let vert_shader_handle = if mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { if let Some(handle) = &self.deferred_material_vertex_shader { handle.clone() } else { @@ -640,8 +639,7 @@ where } else { PREPASS_SHADER_HANDLE }; - - let mut descriptor = RenderPipelineDescriptor { + let descriptor = RenderPipelineDescriptor { vertex: VertexState { shader: vert_shader_handle, entry_point: "vertex".into(), @@ -651,7 +649,7 @@ where fragment, layout: bind_group_layouts, primitive: PrimitiveState { - topology: key.mesh_key.primitive_topology(), + topology: mesh_key.primitive_topology(), strip_index_format: None, front_face: FrontFace::Ccw, cull_mode: None, @@ -676,7 +674,7 @@ where }, }), multisample: MultisampleState { - count: key.mesh_key.msaa_samples(), + count: mesh_key.msaa_samples(), mask: !0, alpha_to_coverage_enabled: false, }, @@ -684,12 +682,6 @@ where label: Some("prepass_pipeline".into()), zero_initialize_workgroup_memory: false, }; - - // This is a bit risky because it's possible to change something that would - // break the prepass but be fine in the main pass. - // Since this api is pretty low-level it doesn't matter that much, but it is a potential issue. - M::specialize(&self.material_pipeline, &mut descriptor, layout, key)?; - Ok(descriptor) } } @@ -774,7 +766,7 @@ pub fn prepare_prepass_view_bind_group( ) { prepass_view_bind_group.no_motion_vectors = Some(render_device.create_bind_group( "prepass_view_no_motion_vectors_bind_group", - &prepass_pipeline.view_layout_no_motion_vectors, + &prepass_pipeline.internal.view_layout_no_motion_vectors, &BindGroupEntries::with_indices(( (0, view_binding.clone()), (1, globals_binding.clone()), @@ -785,7 +777,7 @@ pub fn prepare_prepass_view_bind_group( if let Some(previous_view_uniforms_binding) = previous_view_uniforms.uniforms.binding() { prepass_view_bind_group.motion_vectors = Some(render_device.create_bind_group( "prepass_view_motion_vectors_bind_group", - &prepass_pipeline.view_layout_motion_vectors, + &prepass_pipeline.internal.view_layout_motion_vectors, &BindGroupEntries::with_indices(( (0, view_binding), (1, globals_binding), @@ -797,11 +789,22 @@ pub fn prepare_prepass_view_bind_group( } } +/// Stores the [`SpecializedPrepassMaterialViewPipelineCache`] for each view. #[derive(Resource, Deref, DerefMut)] pub struct SpecializedPrepassMaterialPipelineCache { - // (view_entity, material_entity) -> (tick, pipeline_id) + // view_entity -> view pipeline cache #[deref] - map: HashMap<(MainEntity, MainEntity), (Tick, CachedRenderPipelineId), EntityHash>, + map: HashMap>, + marker: PhantomData, +} + +/// Stores the cached render pipeline ID for each entity in a single view, as +/// well as the last time it was changed. +#[derive(Deref, DerefMut)] +pub struct SpecializedPrepassMaterialViewPipelineCache { + // material entity -> (tick, pipeline_id) + #[deref] + map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>, marker: PhantomData, } @@ -814,17 +817,26 @@ impl Default for SpecializedPrepassMaterialPipelineCache { } } -#[derive(Resource, Deref, DerefMut, Default, Clone)] -pub struct ViewKeyPrepassCache(MainEntityHashMap); +impl Default for SpecializedPrepassMaterialViewPipelineCache { + fn default() -> Self { + Self { + map: HashMap::default(), + marker: PhantomData, + } + } +} #[derive(Resource, Deref, DerefMut, Default, Clone)] -pub struct ViewPrepassSpecializationTicks(MainEntityHashMap); +pub struct ViewKeyPrepassCache(HashMap); + +#[derive(Resource, Deref, DerefMut, Default, Clone)] +pub struct ViewPrepassSpecializationTicks(HashMap); pub fn check_prepass_views_need_specialization( mut view_key_cache: ResMut, mut view_specialization_ticks: ResMut, mut views: Query<( - &MainEntity, + &ExtractedView, &Msaa, Option<&DepthPrepass>, Option<&NormalPrepass>, @@ -832,9 +844,7 @@ pub fn check_prepass_views_need_specialization( )>, ticks: SystemChangeTick, ) { - for (view_entity, msaa, depth_prepass, normal_prepass, motion_vector_prepass) in - views.iter_mut() - { + for (view, msaa, depth_prepass, normal_prepass, motion_vector_prepass) in views.iter_mut() { let mut view_key = MeshPipelineKey::from_msaa_samples(msaa.samples()); if depth_prepass.is_some() { view_key |= MeshPipelineKey::DEPTH_PREPASS; @@ -846,14 +856,14 @@ pub fn check_prepass_views_need_specialization( view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS; } - if let Some(current_key) = view_key_cache.get_mut(view_entity) { + if let Some(current_key) = view_key_cache.get_mut(&view.retained_view_entity) { if *current_key != view_key { - view_key_cache.insert(*view_entity, view_key); - view_specialization_ticks.insert(*view_entity, ticks.this_run()); + view_key_cache.insert(view.retained_view_entity, view_key); + view_specialization_ticks.insert(view.retained_view_entity, ticks.this_run()); } } else { - view_key_cache.insert(*view_entity, view_key); - view_specialization_ticks.insert(*view_entity, ticks.this_run()); + view_key_cache.insert(view.retained_view_entity, view_key); + view_specialization_ticks.insert(view.retained_view_entity, ticks.this_run()); } } } @@ -862,13 +872,12 @@ pub fn specialize_prepass_material_meshes( render_meshes: Res>, render_materials: Res>>, render_mesh_instances: Res, - render_material_instances: Res>, + render_material_instances: Res, render_lightmaps: Res, render_visibility_ranges: Res, material_bind_group_allocator: Res>, view_key_cache: Res, views: Query<( - &MainEntity, &ExtractedView, &RenderVisibleEntities, &Msaa, @@ -907,14 +916,7 @@ pub fn specialize_prepass_material_meshes( M: Material, M::Data: PartialEq + Eq + Hash + Clone, { - for ( - view_entity, - extracted_view, - visible_entities, - msaa, - motion_vector_prepass, - deferred_prepass, - ) in &views + for (extracted_view, visible_entities, msaa, motion_vector_prepass, deferred_prepass) in &views { if !opaque_deferred_render_phases.contains_key(&extracted_view.retained_view_entity) && !alpha_mask_deferred_render_phases.contains_key(&extracted_view.retained_view_entity) @@ -924,15 +926,32 @@ pub fn specialize_prepass_material_meshes( continue; } - let Some(view_key) = view_key_cache.get(view_entity) else { + let Some(view_key) = view_key_cache.get(&extracted_view.retained_view_entity) else { continue; }; + let view_tick = view_specialization_ticks + .get(&extracted_view.retained_view_entity) + .unwrap(); + let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache + .entry(extracted_view.retained_view_entity) + .or_default(); + for (_, visible_entity) in visible_entities.iter::() { - let view_tick = view_specialization_ticks.get(view_entity).unwrap(); + let Some(material_instance) = render_material_instances.instances.get(visible_entity) + else { + continue; + }; + let Ok(material_asset_id) = material_instance.asset_id.try_typed::() else { + continue; + }; + let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) + else { + continue; + }; let entity_tick = entity_specialization_ticks.get(visible_entity).unwrap(); - let last_specialized_tick = specialized_material_pipeline_cache - .get(&(*view_entity, *visible_entity)) + let last_specialized_tick = view_specialized_material_pipeline_cache + .get(visible_entity) .map(|(tick, _)| *tick); let needs_specialization = last_specialized_tick.is_none_or(|tick| { view_tick.is_newer_than(tick, ticks.this_run()) @@ -941,20 +960,13 @@ pub fn specialize_prepass_material_meshes( if !needs_specialization { continue; } - - let Some(material_asset_id) = render_material_instances.get(visible_entity) else { - continue; - }; - let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) - else { - continue; - }; - let Some(material) = render_materials.get(*material_asset_id) else { + let Some(material) = render_materials.get(material_asset_id) else { continue; }; let Some(material_bind_group) = material_bind_group_allocator.get(material.binding.group) else { + warn!("Couldn't get bind group for material"); continue; }; let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else { @@ -1044,10 +1056,8 @@ pub fn specialize_prepass_material_meshes( } }; - specialized_material_pipeline_cache.insert( - (*view_entity, *visible_entity), - (ticks.this_run(), pipeline_id), - ); + view_specialized_material_pipeline_cache + .insert(*visible_entity, (ticks.this_run(), pipeline_id)); } } } @@ -1055,19 +1065,19 @@ pub fn specialize_prepass_material_meshes( pub fn queue_prepass_material_meshes( render_mesh_instances: Res, render_materials: Res>>, - render_material_instances: Res>, + render_material_instances: Res, mesh_allocator: Res, gpu_preprocessing_support: Res, mut opaque_prepass_render_phases: ResMut>, mut alpha_mask_prepass_render_phases: ResMut>, mut opaque_deferred_render_phases: ResMut>, mut alpha_mask_deferred_render_phases: ResMut>, - views: Query<(&MainEntity, &ExtractedView, &RenderVisibleEntities)>, + views: Query<(&ExtractedView, &RenderVisibleEntities)>, specialized_material_pipeline_cache: Res>, ) where M::Data: PartialEq + Eq + Hash + Clone, { - for (view_entity, extracted_view, visible_entities) in &views { + for (extracted_view, visible_entities) in &views { let ( mut opaque_phase, mut alpha_mask_phase, @@ -1080,6 +1090,12 @@ pub fn queue_prepass_material_meshes( alpha_mask_deferred_render_phases.get_mut(&extracted_view.retained_view_entity), ); + let Some(view_specialized_material_pipeline_cache) = + specialized_material_pipeline_cache.get(&extracted_view.retained_view_entity) + else { + continue; + }; + // Skip if there's no place to put the mesh. if opaque_phase.is_none() && alpha_mask_phase.is_none() @@ -1091,7 +1107,7 @@ pub fn queue_prepass_material_meshes( for (render_entity, visible_entity) in visible_entities.iter::() { let Some((current_change_tick, pipeline_id)) = - specialized_material_pipeline_cache.get(&(*view_entity, *visible_entity)) + view_specialized_material_pipeline_cache.get(visible_entity) else { continue; }; @@ -1109,14 +1125,18 @@ pub fn queue_prepass_material_meshes( continue; } - let Some(material_asset_id) = render_material_instances.get(visible_entity) else { + let Some(material_instance) = render_material_instances.instances.get(visible_entity) + else { + continue; + }; + let Ok(material_asset_id) = material_instance.asset_id.try_typed::() else { continue; }; let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) else { continue; }; - let Some(material) = render_materials.get(*material_asset_id) else { + let Some(material) = render_materials.get(material_asset_id) else { continue; }; let (vertex_slab, index_slab) = mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id); @@ -1145,6 +1165,7 @@ pub fn queue_prepass_material_meshes( asset_id: mesh_instance.mesh_asset_id.into(), }, (*render_entity, *visible_entity), + mesh_instance.current_uniform_index, BinnedRenderPhaseType::mesh( mesh_instance.should_batch(), &gpu_preprocessing_support, @@ -1169,6 +1190,7 @@ pub fn queue_prepass_material_meshes( asset_id: mesh_instance.mesh_asset_id.into(), }, (*render_entity, *visible_entity), + mesh_instance.current_uniform_index, BinnedRenderPhaseType::mesh( mesh_instance.should_batch(), &gpu_preprocessing_support, @@ -1195,6 +1217,7 @@ pub fn queue_prepass_material_meshes( batch_set_key, bin_key, (*render_entity, *visible_entity), + mesh_instance.current_uniform_index, BinnedRenderPhaseType::mesh( mesh_instance.should_batch(), &gpu_preprocessing_support, @@ -1218,6 +1241,7 @@ pub fn queue_prepass_material_meshes( batch_set_key, bin_key, (*render_entity, *visible_entity), + mesh_instance.current_uniform_index, BinnedRenderPhaseType::mesh( mesh_instance.should_batch(), &gpu_preprocessing_support, @@ -1229,20 +1253,6 @@ pub fn queue_prepass_material_meshes( _ => {} } } - - // Remove invalid entities from the bins. - if let Some(phase) = opaque_phase { - phase.sweep_old_entities(); - } - if let Some(phase) = alpha_mask_phase { - phase.sweep_old_entities(); - } - if let Some(phase) = opaque_deferred_phase { - phase.sweep_old_entities(); - } - if let Some(phase) = alpha_mask_deferred_phase { - phase.sweep_old_entities(); - } } } diff --git a/crates/bevy_pbr/src/prepass/prepass.wgsl b/crates/bevy_pbr/src/prepass/prepass.wgsl index 26011d609b..52dd9bf201 100644 --- a/crates/bevy_pbr/src/prepass/prepass.wgsl +++ b/crates/bevy_pbr/src/prepass/prepass.wgsl @@ -96,6 +96,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput { #endif // VERTEX_UVS_B #ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS +#ifdef VERTEX_NORMALS #ifdef SKINNED out.world_normal = skinning::skin_normals(world_from_local, vertex.normal); #else // SKINNED @@ -106,6 +107,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput { vertex_no_morph.instance_index ); #endif // SKINNED +#endif // VERTEX_NORMALS #ifdef VERTEX_TANGENTS out.world_tangent = mesh_functions::mesh_tangent_local_to_world( diff --git a/crates/bevy_pbr/src/prepass/prepass_io.wgsl b/crates/bevy_pbr/src/prepass/prepass_io.wgsl index 5f7d8ec071..c3c0e55549 100644 --- a/crates/bevy_pbr/src/prepass/prepass_io.wgsl +++ b/crates/bevy_pbr/src/prepass/prepass_io.wgsl @@ -15,7 +15,9 @@ struct Vertex { #endif #ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS +#ifdef VERTEX_NORMALS @location(3) normal: vec3, +#endif #ifdef VERTEX_TANGENTS @location(4) tangent: vec4, #endif diff --git a/crates/bevy_pbr/src/render/build_indirect_params.wgsl b/crates/bevy_pbr/src/render/build_indirect_params.wgsl index 65cd3833b0..5ca6d4c0cc 100644 --- a/crates/bevy_pbr/src/render/build_indirect_params.wgsl +++ b/crates/bevy_pbr/src/render/build_indirect_params.wgsl @@ -12,7 +12,8 @@ IndirectBatchSet, IndirectParametersIndexed, IndirectParametersNonIndexed, - IndirectParametersMetadata, + IndirectParametersCpuMetadata, + IndirectParametersGpuMetadata, MeshInput } @@ -22,26 +23,30 @@ // Data that we use to generate the indirect parameters. // // The `mesh_preprocess.wgsl` shader emits these. -@group(0) @binding(1) var indirect_parameters_metadata: array; +@group(0) @binding(1) var indirect_parameters_cpu_metadata: + array; + +@group(0) @binding(2) var indirect_parameters_gpu_metadata: + array; // Information about each batch set. // // A *batch set* is a set of meshes that might be multi-drawn together. -@group(0) @binding(2) var indirect_batch_sets: array; +@group(0) @binding(3) var indirect_batch_sets: array; #ifdef INDEXED // The buffer of indirect draw parameters that we generate, and that the GPU // reads to issue the draws. // // This buffer is for indexed meshes. -@group(0) @binding(3) var indirect_parameters: +@group(0) @binding(4) var indirect_parameters: array; #else // INDEXED // The buffer of indirect draw parameters that we generate, and that the GPU // reads to issue the draws. // // This buffer is for non-indexed meshes. -@group(0) @binding(3) var indirect_parameters: +@group(0) @binding(4) var indirect_parameters: array; #endif // INDEXED @@ -51,20 +56,21 @@ fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { // Figure out our instance index (i.e. batch index). If this thread doesn't // correspond to any index, bail. let instance_index = global_invocation_id.x; - if (instance_index >= arrayLength(&indirect_parameters_metadata)) { + if (instance_index >= arrayLength(&indirect_parameters_cpu_metadata)) { return; } // Unpack the metadata for this batch. - let mesh_index = indirect_parameters_metadata[instance_index].mesh_index; - let base_output_index = indirect_parameters_metadata[instance_index].base_output_index; - let batch_set_index = indirect_parameters_metadata[instance_index].batch_set_index; + let base_output_index = indirect_parameters_cpu_metadata[instance_index].base_output_index; + let batch_set_index = indirect_parameters_cpu_metadata[instance_index].batch_set_index; + let mesh_index = indirect_parameters_gpu_metadata[instance_index].mesh_index; // If we aren't using `multi_draw_indirect_count`, we have a 1:1 fixed // assignment of batches to slots in the indirect parameters buffer, so we // can just use the instance index as the index of our indirect parameters. - let early_instance_count = indirect_parameters_metadata[instance_index].early_instance_count; - let late_instance_count = indirect_parameters_metadata[instance_index].late_instance_count; + let early_instance_count = + indirect_parameters_gpu_metadata[instance_index].early_instance_count; + let late_instance_count = indirect_parameters_gpu_metadata[instance_index].late_instance_count; // If in the early phase, we draw only the early meshes. If in the late // phase, we draw only the late meshes. If in the main phase, draw all the diff --git a/crates/bevy_pbr/src/render/gpu_preprocess.rs b/crates/bevy_pbr/src/render/gpu_preprocess.rs index 26559f9223..912f6192ce 100644 --- a/crates/bevy_pbr/src/render/gpu_preprocess.rs +++ b/crates/bevy_pbr/src/render/gpu_preprocess.rs @@ -13,35 +13,36 @@ use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_core_pipeline::{ core_3d::graph::{Core3d, Node3d}, experimental::mip_generation::ViewDepthPyramid, - prepass::{ - DeferredPrepass, DepthPrepass, PreviousViewData, PreviousViewUniformOffset, - PreviousViewUniforms, - }, + prepass::{DepthPrepass, PreviousViewData, PreviousViewUniformOffset, PreviousViewUniforms}, }; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ component::Component, entity::Entity, prelude::resource_exists, - query::{Has, QueryState, With, Without}, + query::{Has, Or, QueryState, With, Without}, resource::Resource, - schedule::IntoSystemConfigs as _, + schedule::IntoScheduleConfigs as _, system::{lifetimeless::Read, Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; +use bevy_render::batching::gpu_preprocessing::{ + GpuPreprocessingMode, IndirectParametersGpuMetadata, UntypedPhaseIndirectParametersBuffers, +}; use bevy_render::{ batching::gpu_preprocessing::{ BatchedInstanceBuffers, GpuOcclusionCullingWorkItemBuffers, GpuPreprocessingSupport, - IndirectBatchSet, IndirectParametersBuffers, IndirectParametersIndexed, - IndirectParametersMetadata, IndirectParametersNonIndexed, + IndirectBatchSet, IndirectParametersBuffers, IndirectParametersCpuMetadata, + IndirectParametersIndexed, IndirectParametersNonIndexed, LatePreprocessWorkItemIndirectParameters, PreprocessWorkItem, PreprocessWorkItemBuffers, + UntypedPhaseBatchedInstanceBuffers, }, experimental::occlusion_culling::OcclusionCulling, render_graph::{Node, NodeRunError, RenderGraphApp, RenderGraphContext}, render_resource::{ binding_types::{storage_buffer, storage_buffer_read_only, texture_2d, uniform_buffer}, BindGroup, BindGroupEntries, BindGroupLayout, BindingResource, Buffer, BufferBinding, - BufferVec, CachedComputePipelineId, ComputePassDescriptor, ComputePipelineDescriptor, + CachedComputePipelineId, ComputePassDescriptor, ComputePipelineDescriptor, DynamicBindGroupLayoutEntries, PipelineCache, PushConstantRange, RawBufferVec, Shader, ShaderStages, ShaderType, SpecializedComputePipeline, SpecializedComputePipelines, TextureSampleType, UninitBufferVec, @@ -60,14 +61,11 @@ use crate::{ graph::NodePbr, MeshCullingData, MeshCullingDataBuffer, MeshInputUniform, MeshUniform, }; -use super::ViewLightEntities; +use super::{ShadowView, ViewLightEntities}; /// The handle to the `mesh_preprocess.wgsl` compute shader. pub const MESH_PREPROCESS_SHADER_HANDLE: Handle = weak_handle!("c8579292-cf92-43b5-9c5a-ec5bd4e44d12"); -/// The handle to the `mesh_preprocess_types.wgsl` compute shader. -pub const MESH_PREPROCESS_TYPES_SHADER_HANDLE: Handle = - weak_handle!("06f797ef-a106-4098-9a2e-20a73aa182e2"); /// The handle to the `reset_indirect_batch_sets.wgsl` compute shader. pub const RESET_INDIRECT_BATCH_SETS_SHADER_HANDLE: Handle = weak_handle!("045fb176-58e2-4e76-b241-7688d761bb23"); @@ -90,6 +88,12 @@ pub struct GpuMeshPreprocessPlugin { pub use_gpu_instance_buffer_builder: bool, } +/// The render node that clears out the GPU-side indirect metadata buffers. +/// +/// This is only used when indirect drawing is enabled. +#[derive(Default)] +pub struct ClearIndirectParametersMetadataNode; + /// The render node for the first mesh preprocessing pass. /// /// This pass runs a compute shader to cull meshes outside the view frustum (if @@ -130,7 +134,6 @@ pub struct LateGpuPreprocessNode { Without, With, With, - Without, ), >, } @@ -140,7 +143,7 @@ pub struct LateGpuPreprocessNode { /// /// This node runs a compute shader on the output of the /// [`EarlyGpuPreprocessNode`] in order to transform the -/// [`IndirectParametersMetadata`] into properly-formatted +/// [`IndirectParametersGpuMetadata`] into properly-formatted /// [`IndirectParametersIndexed`] and [`IndirectParametersNonIndexed`]. pub struct EarlyPrepassBuildIndirectParametersNode { view_query: QueryState< @@ -148,8 +151,7 @@ pub struct EarlyPrepassBuildIndirectParametersNode { ( Without, Without, - With, - Without, + Or<(With, With)>, ), >, } @@ -160,7 +162,7 @@ pub struct EarlyPrepassBuildIndirectParametersNode { /// /// This node runs a compute shader on the output of the /// [`LateGpuPreprocessNode`] in order to transform the -/// [`IndirectParametersMetadata`] into properly-formatted +/// [`IndirectParametersGpuMetadata`] into properly-formatted /// [`IndirectParametersIndexed`] and [`IndirectParametersNonIndexed`]. pub struct LatePrepassBuildIndirectParametersNode { view_query: QueryState< @@ -168,9 +170,8 @@ pub struct LatePrepassBuildIndirectParametersNode { ( Without, Without, - With, + Or<(With, With)>, With, - Without, ), >, } @@ -181,7 +182,7 @@ pub struct LatePrepassBuildIndirectParametersNode { /// /// This node runs a compute shader on the output of the /// [`EarlyGpuPreprocessNode`] and [`LateGpuPreprocessNode`] in order to -/// transform the [`IndirectParametersMetadata`] into properly-formatted +/// transform the [`IndirectParametersGpuMetadata`] into properly-formatted /// [`IndirectParametersIndexed`] and [`IndirectParametersNonIndexed`]. pub struct MainBuildIndirectParametersNode { view_query: QueryState< @@ -393,8 +394,22 @@ pub enum PhasePreprocessBindGroups { /// The bind groups for the compute shaders that reset indirect draw counts and /// build indirect parameters. -#[derive(Resource)] -pub struct BuildIndirectParametersBindGroups { +/// +/// There's one set of bind group for each phase. Phases are keyed off their +/// [`core::any::TypeId`]. +#[derive(Resource, Default, Deref, DerefMut)] +pub struct BuildIndirectParametersBindGroups(pub TypeIdMap); + +impl BuildIndirectParametersBindGroups { + /// Creates a new, empty [`BuildIndirectParametersBindGroups`] table. + pub fn new() -> BuildIndirectParametersBindGroups { + Self::default() + } +} + +/// The per-phase set of bind groups for the compute shaders that reset indirect +/// draw counts and build indirect parameters. +pub struct PhaseBuildIndirectParametersBindGroups { /// The bind group for the `reset_indirect_batch_sets.wgsl` shader, for /// indexed meshes. reset_indexed_indirect_batch_sets: Option, @@ -434,18 +449,6 @@ impl Plugin for GpuMeshPreprocessPlugin { "build_indirect_params.wgsl", Shader::from_wgsl ); - load_internal_asset!( - app, - BUILD_INDIRECT_PARAMS_SHADER_HANDLE, - "build_indirect_params.wgsl", - Shader::from_wgsl - ); - load_internal_asset!( - app, - BUILD_INDIRECT_PARAMS_SHADER_HANDLE, - "build_indirect_params.wgsl", - Shader::from_wgsl - ); } fn finish(&self, app: &mut App) { @@ -470,13 +473,18 @@ impl Plugin for GpuMeshPreprocessPlugin { ( prepare_preprocess_pipelines.in_set(RenderSet::Prepare), prepare_preprocess_bind_groups - .run_if( - resource_exists::>, - ) + .run_if(resource_exists::>) .in_set(RenderSet::PrepareBindGroups), write_mesh_culling_data_buffer.in_set(RenderSet::PrepareResourcesFlush), ), ) + .add_render_graph_node::( + Core3d, + NodePbr::ClearIndirectParametersMetadata + ) .add_render_graph_node::(Core3d, NodePbr::EarlyGpuPreprocess) .add_render_graph_node::(Core3d, NodePbr::LateGpuPreprocess) .add_render_graph_node::( @@ -494,28 +502,84 @@ impl Plugin for GpuMeshPreprocessPlugin { .add_render_graph_edges( Core3d, ( + NodePbr::ClearIndirectParametersMetadata, NodePbr::EarlyGpuPreprocess, NodePbr::EarlyPrepassBuildIndirectParameters, Node3d::EarlyPrepass, + Node3d::EarlyDeferredPrepass, Node3d::EarlyDownsampleDepth, NodePbr::LateGpuPreprocess, NodePbr::LatePrepassBuildIndirectParameters, Node3d::LatePrepass, + Node3d::LateDeferredPrepass, NodePbr::MainBuildIndirectParameters, - // Shadows don't currently support occlusion culling, so we - // treat shadows as effectively the main phase for our - // purposes. - NodePbr::ShadowPass, + Node3d::StartMainPass, ), - ) - .add_render_graph_edge( + ).add_render_graph_edges( Core3d, - NodePbr::MainBuildIndirectParameters, - Node3d::DeferredPrepass + ( + NodePbr::EarlyPrepassBuildIndirectParameters, + NodePbr::EarlyShadowPass, + Node3d::EarlyDownsampleDepth, + ) + ).add_render_graph_edges( + Core3d, + ( + NodePbr::LatePrepassBuildIndirectParameters, + NodePbr::LateShadowPass, + NodePbr::MainBuildIndirectParameters, + ) ); } } +impl Node for ClearIndirectParametersMetadataNode { + fn run<'w>( + &self, + _: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + ) -> Result<(), NodeRunError> { + let Some(indirect_parameters_buffers) = world.get_resource::() + else { + return Ok(()); + }; + + // Clear out each indexed and non-indexed GPU-side buffer. + for phase_indirect_parameters_buffers in indirect_parameters_buffers.values() { + if let Some(indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers + .indexed + .gpu_metadata_buffer() + { + render_context.command_encoder().clear_buffer( + indexed_gpu_metadata_buffer, + 0, + Some( + phase_indirect_parameters_buffers.indexed.batch_count() as u64 + * size_of::() as u64, + ), + ); + } + + if let Some(non_indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers + .non_indexed + .gpu_metadata_buffer() + { + render_context.command_encoder().clear_buffer( + non_indexed_gpu_metadata_buffer, + 0, + Some( + phase_indirect_parameters_buffers.non_indexed.batch_count() as u64 + * size_of::() as u64, + ), + ); + } + } + + Ok(()) + } +} + impl FromWorld for EarlyGpuPreprocessNode { fn from_world(world: &mut World) -> Self { Self { @@ -538,10 +602,8 @@ impl Node for EarlyGpuPreprocessNode { world: &'w World, ) -> Result<(), NodeRunError> { // Grab the [`BatchedInstanceBuffers`]. - let BatchedInstanceBuffers { - work_item_buffers: ref index_buffers, - .. - } = world.resource::>(); + let batched_instance_buffers = + world.resource::>(); let pipeline_cache = world.resource::(); let preprocess_pipelines = world.resource::(); @@ -583,13 +645,6 @@ impl Node for EarlyGpuPreprocessNode { continue; }; - // Grab the work item buffers for this view. - let Some(phase_work_item_buffers) = index_buffers.get(&view.retained_view_entity) - else { - warn!("The preprocessing index buffer wasn't present"); - continue; - }; - // Select the right pipeline, depending on whether GPU culling is in // use. let maybe_pipeline_id = if no_indirect_drawing { @@ -620,18 +675,26 @@ impl Node for EarlyGpuPreprocessNode { compute_pass.set_pipeline(preprocess_pipeline); // Loop over each render phase. - for (phase_type_id, work_item_buffers) in phase_work_item_buffers { + for (phase_type_id, batched_phase_instance_buffers) in + &batched_instance_buffers.phase_instance_buffers + { + // Grab the work item buffers for this view. + let Some(work_item_buffers) = batched_phase_instance_buffers + .work_item_buffers + .get(&view.retained_view_entity) + else { + continue; + }; + // Fetch the bind group for the render phase. let Some(phase_bind_groups) = bind_groups.get(phase_type_id) else { continue; }; - // If we're drawing indirectly, make sure the mesh preprocessing - // shader has access to the view info it needs to do culling. - let mut dynamic_offsets: SmallVec<[u32; 1]> = smallvec![]; - if !no_indirect_drawing { - dynamic_offsets.push(view_uniform_offset.offset); - } + // Make sure the mesh preprocessing shader has access to the + // view info it needs to do culling and motion vector + // computation. + let dynamic_offsets = [view_uniform_offset.offset]; // Are we drawing directly or indirectly? match *phase_bind_groups { @@ -775,12 +838,8 @@ impl Node for LateGpuPreprocessNode { world: &'w World, ) -> Result<(), NodeRunError> { // Grab the [`BatchedInstanceBuffers`]. - let BatchedInstanceBuffers { - ref work_item_buffers, - ref late_indexed_indirect_parameters_buffer, - ref late_non_indexed_indirect_parameters_buffer, - .. - } = world.resource::>(); + let batched_instance_buffers = + world.resource::>(); let pipeline_cache = world.resource::(); let preprocess_pipelines = world.resource::(); @@ -795,13 +854,6 @@ impl Node for LateGpuPreprocessNode { // Run the compute passes. for (view, bind_groups, view_uniform_offset) in self.view_query.iter_manual(world) { - // Grab the work item buffers for this view. - let Some(phase_work_item_buffers) = work_item_buffers.get(&view.retained_view_entity) - else { - warn!("The preprocessing index buffer wasn't present"); - continue; - }; - let maybe_pipeline_id = preprocess_pipelines .late_gpu_occlusion_culling_preprocess .pipeline_id; @@ -821,7 +873,25 @@ impl Node for LateGpuPreprocessNode { compute_pass.set_pipeline(preprocess_pipeline); - for (phase_type_id, work_item_buffers) in phase_work_item_buffers { + // Loop over each phase. Because we built the phases in parallel, + // each phase has a separate set of instance buffers. + for (phase_type_id, batched_phase_instance_buffers) in + &batched_instance_buffers.phase_instance_buffers + { + let UntypedPhaseBatchedInstanceBuffers { + ref work_item_buffers, + ref late_indexed_indirect_parameters_buffer, + ref late_non_indexed_indirect_parameters_buffer, + .. + } = *batched_phase_instance_buffers; + + // Grab the work item buffers for this view. + let Some(phase_work_item_buffers) = + work_item_buffers.get(&view.retained_view_entity) + else { + continue; + }; + let ( PreprocessWorkItemBuffers::Indirect { gpu_occlusion_culling: @@ -833,14 +903,14 @@ impl Node for LateGpuPreprocessNode { .. }, Some(PhasePreprocessBindGroups::IndirectOcclusionCulling { - late_indexed: ref maybe_late_indexed_bind_group, - late_non_indexed: ref maybe_late_non_indexed_bind_group, + late_indexed: maybe_late_indexed_bind_group, + late_non_indexed: maybe_late_non_indexed_bind_group, .. }), Some(late_indexed_indirect_parameters_buffer), Some(late_non_indexed_indirect_parameters_buffer), ) = ( - work_item_buffers, + phase_work_item_buffers, bind_groups.get(phase_type_id), late_indexed_indirect_parameters_buffer.buffer(), late_non_indexed_indirect_parameters_buffer.buffer(), @@ -1029,57 +1099,71 @@ fn run_build_indirect_parameters_node( return Ok(()); }; - // Build indexed indirect parameters. - if let ( - Some(reset_indexed_indirect_batch_sets_bind_group), - Some(build_indirect_indexed_params_bind_group), - ) = ( - &build_indirect_params_bind_groups.reset_indexed_indirect_batch_sets, - &build_indirect_params_bind_groups.build_indexed_indirect, - ) { - compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline); - compute_pass.set_bind_group(0, reset_indexed_indirect_batch_sets_bind_group, &[]); - let workgroup_count = indirect_parameters_buffers - .batch_set_count(true) - .div_ceil(WORKGROUP_SIZE); - if workgroup_count > 0 { - compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + // Loop over each phase. As each has as separate set of buffers, we need to + // build indirect parameters individually for each phase. + for (phase_type_id, phase_build_indirect_params_bind_groups) in + build_indirect_params_bind_groups.iter() + { + let Some(phase_indirect_parameters_buffers) = + indirect_parameters_buffers.get(phase_type_id) + else { + continue; + }; + + // Build indexed indirect parameters. + if let ( + Some(reset_indexed_indirect_batch_sets_bind_group), + Some(build_indirect_indexed_params_bind_group), + ) = ( + &phase_build_indirect_params_bind_groups.reset_indexed_indirect_batch_sets, + &phase_build_indirect_params_bind_groups.build_indexed_indirect, + ) { + compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline); + compute_pass.set_bind_group(0, reset_indexed_indirect_batch_sets_bind_group, &[]); + let workgroup_count = phase_indirect_parameters_buffers + .batch_set_count(true) + .div_ceil(WORKGROUP_SIZE); + if workgroup_count > 0 { + compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + } + + compute_pass.set_pipeline(build_indexed_indirect_params_pipeline); + compute_pass.set_bind_group(0, build_indirect_indexed_params_bind_group, &[]); + let workgroup_count = phase_indirect_parameters_buffers + .indexed + .batch_count() + .div_ceil(WORKGROUP_SIZE); + if workgroup_count > 0 { + compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + } } - compute_pass.set_pipeline(build_indexed_indirect_params_pipeline); - compute_pass.set_bind_group(0, build_indirect_indexed_params_bind_group, &[]); - let workgroup_count = indirect_parameters_buffers - .indexed_batch_count() - .div_ceil(WORKGROUP_SIZE); - if workgroup_count > 0 { - compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); - } - } + // Build non-indexed indirect parameters. + if let ( + Some(reset_non_indexed_indirect_batch_sets_bind_group), + Some(build_indirect_non_indexed_params_bind_group), + ) = ( + &phase_build_indirect_params_bind_groups.reset_non_indexed_indirect_batch_sets, + &phase_build_indirect_params_bind_groups.build_non_indexed_indirect, + ) { + compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline); + compute_pass.set_bind_group(0, reset_non_indexed_indirect_batch_sets_bind_group, &[]); + let workgroup_count = phase_indirect_parameters_buffers + .batch_set_count(false) + .div_ceil(WORKGROUP_SIZE); + if workgroup_count > 0 { + compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + } - // Build non-indexed indirect parameters. - if let ( - Some(reset_non_indexed_indirect_batch_sets_bind_group), - Some(build_indirect_non_indexed_params_bind_group), - ) = ( - &build_indirect_params_bind_groups.reset_non_indexed_indirect_batch_sets, - &build_indirect_params_bind_groups.build_non_indexed_indirect, - ) { - compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline); - compute_pass.set_bind_group(0, reset_non_indexed_indirect_batch_sets_bind_group, &[]); - let workgroup_count = indirect_parameters_buffers - .batch_set_count(false) - .div_ceil(WORKGROUP_SIZE); - if workgroup_count > 0 { - compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); - } - - compute_pass.set_pipeline(build_non_indexed_indirect_params_pipeline); - compute_pass.set_bind_group(0, build_indirect_non_indexed_params_bind_group, &[]); - let workgroup_count = indirect_parameters_buffers - .non_indexed_batch_count() - .div_ceil(WORKGROUP_SIZE); - if workgroup_count > 0 { - compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + compute_pass.set_pipeline(build_non_indexed_indirect_params_pipeline); + compute_pass.set_bind_group(0, build_indirect_non_indexed_params_bind_group, &[]); + let workgroup_count = phase_indirect_parameters_buffers + .non_indexed + .batch_count() + .div_ceil(WORKGROUP_SIZE); + if workgroup_count > 0 { + compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + } } } @@ -1089,26 +1173,41 @@ fn run_build_indirect_parameters_node( impl PreprocessPipelines { /// Returns true if the preprocessing and indirect parameters pipelines have /// been loaded or false otherwise. - pub(crate) fn pipelines_are_loaded(&self, pipeline_cache: &PipelineCache) -> bool { - self.direct_preprocess.is_loaded(pipeline_cache) - && self - .gpu_frustum_culling_preprocess - .is_loaded(pipeline_cache) - && self - .early_gpu_occlusion_culling_preprocess - .is_loaded(pipeline_cache) - && self - .late_gpu_occlusion_culling_preprocess - .is_loaded(pipeline_cache) - && self - .gpu_frustum_culling_build_indexed_indirect_params - .is_loaded(pipeline_cache) - && self - .gpu_frustum_culling_build_non_indexed_indirect_params - .is_loaded(pipeline_cache) - && self.early_phase.is_loaded(pipeline_cache) - && self.late_phase.is_loaded(pipeline_cache) - && self.main_phase.is_loaded(pipeline_cache) + pub(crate) fn pipelines_are_loaded( + &self, + pipeline_cache: &PipelineCache, + preprocessing_support: &GpuPreprocessingSupport, + ) -> bool { + match preprocessing_support.max_supported_mode { + GpuPreprocessingMode::None => false, + GpuPreprocessingMode::PreprocessingOnly => { + self.direct_preprocess.is_loaded(pipeline_cache) + && self + .gpu_frustum_culling_preprocess + .is_loaded(pipeline_cache) + } + GpuPreprocessingMode::Culling => { + self.direct_preprocess.is_loaded(pipeline_cache) + && self + .gpu_frustum_culling_preprocess + .is_loaded(pipeline_cache) + && self + .early_gpu_occlusion_culling_preprocess + .is_loaded(pipeline_cache) + && self + .late_gpu_occlusion_culling_preprocess + .is_loaded(pipeline_cache) + && self + .gpu_frustum_culling_build_indexed_indirect_params + .is_loaded(pipeline_cache) + && self + .gpu_frustum_culling_build_non_indexed_indirect_params + .is_loaded(pipeline_cache) + && self.early_phase.is_loaded(pipeline_cache) + && self.late_phase.is_loaded(pipeline_cache) + && self.main_phase.is_loaded(pipeline_cache) + } + } } } @@ -1317,6 +1416,11 @@ fn preprocess_direct_bind_group_layout_entries() -> DynamicBindGroupLayoutEntrie DynamicBindGroupLayoutEntries::new_with_indices( ShaderStages::COMPUTE, ( + // `view` + ( + 0, + uniform_buffer::(/* has_dynamic_offset= */ true), + ), // `current_input` (3, storage_buffer_read_only::(false)), // `previous_input` @@ -1329,7 +1433,7 @@ fn preprocess_direct_bind_group_layout_entries() -> DynamicBindGroupLayoutEntrie ) } -// Returns the first 3 bind group layout entries shared between all invocations +// Returns the first 4 bind group layout entries shared between all invocations // of the indirect parameters building shader. fn build_indirect_params_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries { DynamicBindGroupLayoutEntries::new_with_indices( @@ -1338,9 +1442,13 @@ fn build_indirect_params_bind_group_layout_entries() -> DynamicBindGroupLayoutEn (0, storage_buffer_read_only::(false)), ( 1, - storage_buffer_read_only::(false), + storage_buffer_read_only::(false), ), - (2, storage_buffer::(false)), + ( + 2, + storage_buffer_read_only::(false), + ), + (3, storage_buffer::(false)), ), ) } @@ -1351,21 +1459,23 @@ fn gpu_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries { // GPU culling bind group parameters are a superset of those in the CPU // culling (direct) shader. preprocess_direct_bind_group_layout_entries().extend_with_indices(( - // `indirect_parameters` + // `indirect_parameters_cpu_metadata` ( 7, - storage_buffer::(/* has_dynamic_offset= */ false), + storage_buffer_read_only::( + /* has_dynamic_offset= */ false, + ), + ), + // `indirect_parameters_gpu_metadata` + ( + 8, + storage_buffer::(/* has_dynamic_offset= */ false), ), // `mesh_culling_data` ( - 8, + 9, storage_buffer_read_only::(/* has_dynamic_offset= */ false), ), - // `view` - ( - 0, - uniform_buffer::(/* has_dynamic_offset= */ true), - ), )) } @@ -1400,6 +1510,7 @@ pub fn prepare_preprocess_pipelines( SpecializedComputePipelines, >, preprocess_pipelines: ResMut, + gpu_preprocessing_support: Res, ) { let preprocess_pipelines = preprocess_pipelines.into_inner(); @@ -1413,22 +1524,25 @@ pub fn prepare_preprocess_pipelines( &mut specialized_preprocess_pipelines, PreprocessPipelineKey::FRUSTUM_CULLING, ); - preprocess_pipelines - .early_gpu_occlusion_culling_preprocess - .prepare( - &pipeline_cache, - &mut specialized_preprocess_pipelines, - PreprocessPipelineKey::FRUSTUM_CULLING - | PreprocessPipelineKey::OCCLUSION_CULLING - | PreprocessPipelineKey::EARLY_PHASE, - ); - preprocess_pipelines - .late_gpu_occlusion_culling_preprocess - .prepare( - &pipeline_cache, - &mut specialized_preprocess_pipelines, - PreprocessPipelineKey::FRUSTUM_CULLING | PreprocessPipelineKey::OCCLUSION_CULLING, - ); + + if gpu_preprocessing_support.is_culling_supported() { + preprocess_pipelines + .early_gpu_occlusion_culling_preprocess + .prepare( + &pipeline_cache, + &mut specialized_preprocess_pipelines, + PreprocessPipelineKey::FRUSTUM_CULLING + | PreprocessPipelineKey::OCCLUSION_CULLING + | PreprocessPipelineKey::EARLY_PHASE, + ); + preprocess_pipelines + .late_gpu_occlusion_culling_preprocess + .prepare( + &pipeline_cache, + &mut specialized_preprocess_pipelines, + PreprocessPipelineKey::FRUSTUM_CULLING | PreprocessPipelineKey::OCCLUSION_CULLING, + ); + } let mut build_indirect_parameters_pipeline_key = BuildIndirectParametersPipelineKey::empty(); @@ -1458,6 +1572,10 @@ pub fn prepare_preprocess_pipelines( build_indirect_parameters_pipeline_key, ); + if !gpu_preprocessing_support.is_culling_supported() { + return; + } + for (preprocess_phase_pipelines, build_indirect_parameters_phase_pipeline_key) in [ ( &mut preprocess_pipelines.early_phase, @@ -1637,18 +1755,14 @@ pub fn prepare_preprocess_bind_groups( ) { // Grab the `BatchedInstanceBuffers`. let BatchedInstanceBuffers { - data_buffer: ref data_buffer_vec, - ref work_item_buffers, - current_input_buffer: ref current_input_buffer_vec, - previous_input_buffer: ref previous_input_buffer_vec, - ref late_indexed_indirect_parameters_buffer, - ref late_non_indexed_indirect_parameters_buffer, + current_input_buffer: current_input_buffer_vec, + previous_input_buffer: previous_input_buffer_vec, + phase_instance_buffers, } = batched_instance_buffers.into_inner(); - let (Some(current_input_buffer), Some(previous_input_buffer), Some(data_buffer)) = ( + let (Some(current_input_buffer), Some(previous_input_buffer)) = ( current_input_buffer_vec.buffer().buffer(), previous_input_buffer_vec.buffer().buffer(), - data_buffer_vec.buffer(), ) else { return; }; @@ -1659,22 +1773,39 @@ pub fn prepare_preprocess_bind_groups( // Loop over each view. for (view_entity, view) in &views { - let Some(phase_work_item_buffers) = work_item_buffers.get(&view.retained_view_entity) - else { - continue; - }; - let mut bind_groups = TypeIdMap::default(); // Loop over each phase. - for (&phase_id, work_item_buffers) in phase_work_item_buffers { + for (phase_type_id, phase_instance_buffers) in phase_instance_buffers { + let UntypedPhaseBatchedInstanceBuffers { + data_buffer: ref data_buffer_vec, + ref work_item_buffers, + ref late_indexed_indirect_parameters_buffer, + ref late_non_indexed_indirect_parameters_buffer, + } = *phase_instance_buffers; + + let Some(data_buffer) = data_buffer_vec.buffer() else { + continue; + }; + + // Grab the indirect parameters buffers for this phase. + let Some(phase_indirect_parameters_buffers) = + indirect_parameters_buffers.get(phase_type_id) + else { + continue; + }; + + let Some(work_item_buffers) = work_item_buffers.get(&view.retained_view_entity) else { + continue; + }; + // Create the `PreprocessBindGroupBuilder`. let preprocess_bind_group_builder = PreprocessBindGroupBuilder { view: view_entity, late_indexed_indirect_parameters_buffer, late_non_indexed_indirect_parameters_buffer, render_device: &render_device, - indirect_parameters_buffers: &indirect_parameters_buffers, + phase_indirect_parameters_buffers, mesh_culling_data_buffer: &mesh_culling_data_buffer, view_uniforms: &view_uniforms, previous_view_uniforms: &previous_view_uniforms, @@ -1725,7 +1856,7 @@ pub fn prepare_preprocess_bind_groups( // Write that bind group in. if let Some(bind_group) = bind_group { any_indirect = any_indirect || was_indirect; - bind_groups.insert(phase_id, bind_group); + bind_groups.insert(*phase_type_id, bind_group); } } @@ -1764,7 +1895,7 @@ struct PreprocessBindGroupBuilder<'a> { /// The device. render_device: &'a RenderDevice, /// The buffers that store indirect draw parameters. - indirect_parameters_buffers: &'a IndirectParametersBuffers, + phase_indirect_parameters_buffers: &'a UntypedPhaseIndirectParametersBuffers, /// The GPU buffer that stores the information needed to cull each mesh. mesh_culling_data_buffer: &'a MeshCullingDataBuffer, /// The GPU buffer that stores information about the view. @@ -1792,7 +1923,7 @@ impl<'a> PreprocessBindGroupBuilder<'a> { /// and GPU occlusion culling are both disabled. fn create_direct_preprocess_bind_groups( &self, - work_item_buffer: &BufferVec, + work_item_buffer: &RawBufferVec, ) -> Option { // Don't use `as_entire_binding()` here; the shader reads the array // length and the underlying buffer may be longer than the actual size @@ -1807,6 +1938,7 @@ impl<'a> PreprocessBindGroupBuilder<'a> { "preprocess_direct_bind_group", &self.pipelines.direct_preprocess.bind_group_layout, &BindGroupEntries::with_indices(( + (0, self.view_uniforms.uniforms.binding()?), (3, self.current_input_buffer.as_entire_binding()), (4, self.previous_input_buffer.as_entire_binding()), ( @@ -1828,8 +1960,8 @@ impl<'a> PreprocessBindGroupBuilder<'a> { fn create_indirect_occlusion_culling_preprocess_bind_groups( &self, view_depth_pyramids: &Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>, - indexed_work_item_buffer: &BufferVec, - non_indexed_work_item_buffer: &BufferVec, + indexed_work_item_buffer: &RawBufferVec, + non_indexed_work_item_buffer: &RawBufferVec, gpu_occlusion_culling_work_item_buffers: &GpuOcclusionCullingWorkItemBuffers, ) -> Option { let GpuOcclusionCullingWorkItemBuffers { @@ -1876,7 +2008,7 @@ impl<'a> PreprocessBindGroupBuilder<'a> { &self, view_depth_pyramid: &ViewDepthPyramid, previous_view_uniform_offset: &PreviousViewUniformOffset, - indexed_work_item_buffer: &BufferVec, + indexed_work_item_buffer: &RawBufferVec, late_indexed_work_item_buffer: &UninitBufferVec, ) -> Option { let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?; @@ -1884,13 +2016,19 @@ impl<'a> PreprocessBindGroupBuilder<'a> { let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?; match ( - self.indirect_parameters_buffers.indexed_metadata_buffer(), + self.phase_indirect_parameters_buffers + .indexed + .cpu_metadata_buffer(), + self.phase_indirect_parameters_buffers + .indexed + .gpu_metadata_buffer(), indexed_work_item_buffer.buffer(), late_indexed_work_item_buffer.buffer(), self.late_indexed_indirect_parameters_buffer.buffer(), ) { ( - Some(indexed_metadata_buffer), + Some(indexed_cpu_metadata_buffer), + Some(indexed_gpu_metadata_buffer), Some(indexed_work_item_gpu_buffer), Some(late_indexed_work_item_gpu_buffer), Some(late_indexed_indirect_parameters_buffer), @@ -1923,8 +2061,9 @@ impl<'a> PreprocessBindGroupBuilder<'a> { }), ), (6, self.data_buffer.as_entire_binding()), - (7, indexed_metadata_buffer.as_entire_binding()), - (8, mesh_culling_data_buffer.as_entire_binding()), + (7, indexed_cpu_metadata_buffer.as_entire_binding()), + (8, indexed_gpu_metadata_buffer.as_entire_binding()), + (9, mesh_culling_data_buffer.as_entire_binding()), (0, view_uniforms_binding.clone()), (10, &view_depth_pyramid.all_mips), ( @@ -1967,7 +2106,7 @@ impl<'a> PreprocessBindGroupBuilder<'a> { &self, view_depth_pyramid: &ViewDepthPyramid, previous_view_uniform_offset: &PreviousViewUniformOffset, - non_indexed_work_item_buffer: &BufferVec, + non_indexed_work_item_buffer: &RawBufferVec, late_non_indexed_work_item_buffer: &UninitBufferVec, ) -> Option { let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?; @@ -1975,14 +2114,19 @@ impl<'a> PreprocessBindGroupBuilder<'a> { let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?; match ( - self.indirect_parameters_buffers - .non_indexed_metadata_buffer(), + self.phase_indirect_parameters_buffers + .non_indexed + .cpu_metadata_buffer(), + self.phase_indirect_parameters_buffers + .non_indexed + .gpu_metadata_buffer(), non_indexed_work_item_buffer.buffer(), late_non_indexed_work_item_buffer.buffer(), self.late_non_indexed_indirect_parameters_buffer.buffer(), ) { ( - Some(non_indexed_metadata_buffer), + Some(non_indexed_cpu_metadata_buffer), + Some(non_indexed_gpu_metadata_buffer), Some(non_indexed_work_item_gpu_buffer), Some(late_non_indexed_work_item_buffer), Some(late_non_indexed_indirect_parameters_buffer), @@ -2015,8 +2159,9 @@ impl<'a> PreprocessBindGroupBuilder<'a> { }), ), (6, self.data_buffer.as_entire_binding()), - (7, non_indexed_metadata_buffer.as_entire_binding()), - (8, mesh_culling_data_buffer.as_entire_binding()), + (7, non_indexed_cpu_metadata_buffer.as_entire_binding()), + (8, non_indexed_gpu_metadata_buffer.as_entire_binding()), + (9, mesh_culling_data_buffer.as_entire_binding()), (0, view_uniforms_binding.clone()), (10, &view_depth_pyramid.all_mips), ( @@ -2066,12 +2211,18 @@ impl<'a> PreprocessBindGroupBuilder<'a> { let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?; match ( - self.indirect_parameters_buffers.indexed_metadata_buffer(), + self.phase_indirect_parameters_buffers + .indexed + .cpu_metadata_buffer(), + self.phase_indirect_parameters_buffers + .indexed + .gpu_metadata_buffer(), late_indexed_work_item_buffer.buffer(), self.late_indexed_indirect_parameters_buffer.buffer(), ) { ( - Some(indexed_metadata_buffer), + Some(indexed_cpu_metadata_buffer), + Some(indexed_gpu_metadata_buffer), Some(late_indexed_work_item_gpu_buffer), Some(late_indexed_indirect_parameters_buffer), ) => { @@ -2103,8 +2254,9 @@ impl<'a> PreprocessBindGroupBuilder<'a> { }), ), (6, self.data_buffer.as_entire_binding()), - (7, indexed_metadata_buffer.as_entire_binding()), - (8, mesh_culling_data_buffer.as_entire_binding()), + (7, indexed_cpu_metadata_buffer.as_entire_binding()), + (8, indexed_gpu_metadata_buffer.as_entire_binding()), + (9, mesh_culling_data_buffer.as_entire_binding()), (0, view_uniforms_binding.clone()), (10, &view_depth_pyramid.all_mips), ( @@ -2146,13 +2298,18 @@ impl<'a> PreprocessBindGroupBuilder<'a> { let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?; match ( - self.indirect_parameters_buffers - .non_indexed_metadata_buffer(), + self.phase_indirect_parameters_buffers + .non_indexed + .cpu_metadata_buffer(), + self.phase_indirect_parameters_buffers + .non_indexed + .gpu_metadata_buffer(), late_non_indexed_work_item_buffer.buffer(), self.late_non_indexed_indirect_parameters_buffer.buffer(), ) { ( - Some(non_indexed_metadata_buffer), + Some(non_indexed_cpu_metadata_buffer), + Some(non_indexed_gpu_metadata_buffer), Some(non_indexed_work_item_gpu_buffer), Some(late_non_indexed_indirect_parameters_buffer), ) => { @@ -2184,8 +2341,9 @@ impl<'a> PreprocessBindGroupBuilder<'a> { }), ), (6, self.data_buffer.as_entire_binding()), - (7, non_indexed_metadata_buffer.as_entire_binding()), - (8, mesh_culling_data_buffer.as_entire_binding()), + (7, non_indexed_cpu_metadata_buffer.as_entire_binding()), + (8, non_indexed_gpu_metadata_buffer.as_entire_binding()), + (9, mesh_culling_data_buffer.as_entire_binding()), (0, view_uniforms_binding.clone()), (10, &view_depth_pyramid.all_mips), ( @@ -2218,8 +2376,8 @@ impl<'a> PreprocessBindGroupBuilder<'a> { /// is enabled, but GPU occlusion culling is disabled. fn create_indirect_frustum_culling_preprocess_bind_groups( &self, - indexed_work_item_buffer: &BufferVec, - non_indexed_work_item_buffer: &BufferVec, + indexed_work_item_buffer: &RawBufferVec, + non_indexed_work_item_buffer: &RawBufferVec, ) -> Option { Some(PhasePreprocessBindGroups::IndirectFrustumCulling { indexed: self @@ -2234,16 +2392,25 @@ impl<'a> PreprocessBindGroupBuilder<'a> { /// frustum culling is enabled, but GPU occlusion culling is disabled. fn create_indirect_frustum_culling_indexed_bind_group( &self, - indexed_work_item_buffer: &BufferVec, + indexed_work_item_buffer: &RawBufferVec, ) -> Option { let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?; let view_uniforms_binding = self.view_uniforms.uniforms.binding()?; match ( - self.indirect_parameters_buffers.indexed_metadata_buffer(), + self.phase_indirect_parameters_buffers + .indexed + .cpu_metadata_buffer(), + self.phase_indirect_parameters_buffers + .indexed + .gpu_metadata_buffer(), indexed_work_item_buffer.buffer(), ) { - (Some(indexed_metadata_buffer), Some(indexed_work_item_gpu_buffer)) => { + ( + Some(indexed_cpu_metadata_buffer), + Some(indexed_gpu_metadata_buffer), + Some(indexed_work_item_gpu_buffer), + ) => { // Don't use `as_entire_binding()` here; the shader reads the array // length and the underlying buffer may be longer than the actual size // of the vector. @@ -2272,8 +2439,9 @@ impl<'a> PreprocessBindGroupBuilder<'a> { }), ), (6, self.data_buffer.as_entire_binding()), - (7, indexed_metadata_buffer.as_entire_binding()), - (8, mesh_culling_data_buffer.as_entire_binding()), + (7, indexed_cpu_metadata_buffer.as_entire_binding()), + (8, indexed_gpu_metadata_buffer.as_entire_binding()), + (9, mesh_culling_data_buffer.as_entire_binding()), (0, view_uniforms_binding.clone()), )), ), @@ -2287,17 +2455,25 @@ impl<'a> PreprocessBindGroupBuilder<'a> { /// GPU frustum culling is enabled, but GPU occlusion culling is disabled. fn create_indirect_frustum_culling_non_indexed_bind_group( &self, - non_indexed_work_item_buffer: &BufferVec, + non_indexed_work_item_buffer: &RawBufferVec, ) -> Option { let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?; let view_uniforms_binding = self.view_uniforms.uniforms.binding()?; match ( - self.indirect_parameters_buffers - .non_indexed_metadata_buffer(), + self.phase_indirect_parameters_buffers + .non_indexed + .cpu_metadata_buffer(), + self.phase_indirect_parameters_buffers + .non_indexed + .gpu_metadata_buffer(), non_indexed_work_item_buffer.buffer(), ) { - (Some(non_indexed_metadata_buffer), Some(non_indexed_work_item_gpu_buffer)) => { + ( + Some(non_indexed_cpu_metadata_buffer), + Some(non_indexed_gpu_metadata_buffer), + Some(non_indexed_work_item_gpu_buffer), + ) => { // Don't use `as_entire_binding()` here; the shader reads the array // length and the underlying buffer may be longer than the actual size // of the vector. @@ -2326,8 +2502,9 @@ impl<'a> PreprocessBindGroupBuilder<'a> { }), ), (6, self.data_buffer.as_entire_binding()), - (7, non_indexed_metadata_buffer.as_entire_binding()), - (8, mesh_culling_data_buffer.as_entire_binding()), + (7, non_indexed_cpu_metadata_buffer.as_entire_binding()), + (8, non_indexed_gpu_metadata_buffer.as_entire_binding()), + (9, mesh_culling_data_buffer.as_entire_binding()), (0, view_uniforms_binding.clone()), )), ), @@ -2346,121 +2523,168 @@ fn create_build_indirect_parameters_bind_groups( render_device: &RenderDevice, pipelines: &PreprocessPipelines, current_input_buffer: &Buffer, - indirect_parameters_buffer: &IndirectParametersBuffers, + indirect_parameters_buffers: &IndirectParametersBuffers, ) { - commands.insert_resource(BuildIndirectParametersBindGroups { - reset_indexed_indirect_batch_sets: match ( - indirect_parameters_buffer.indexed_batch_sets_buffer(), - ) { - (Some(indexed_batch_sets_buffer),) => Some( - render_device.create_bind_group( - "reset_indexed_indirect_batch_sets_bind_group", - // The early bind group is good for the main phase and late - // phase too. They bind the same buffers. - &pipelines - .early_phase - .reset_indirect_batch_sets - .bind_group_layout, - &BindGroupEntries::sequential((indexed_batch_sets_buffer.as_entire_binding(),)), - ), - ), - _ => None, - }, + let mut build_indirect_parameters_bind_groups = BuildIndirectParametersBindGroups::new(); - reset_non_indexed_indirect_batch_sets: match ( - indirect_parameters_buffer.non_indexed_batch_sets_buffer(), - ) { - (Some(non_indexed_batch_sets_buffer),) => Some( - render_device.create_bind_group( - "reset_non_indexed_indirect_batch_sets_bind_group", - // The early bind group is good for the main phase and late - // phase too. They bind the same buffers. - &pipelines - .early_phase - .reset_indirect_batch_sets - .bind_group_layout, - &BindGroupEntries::sequential(( - non_indexed_batch_sets_buffer.as_entire_binding(), - )), - ), - ), - _ => None, - }, + for (phase_type_id, phase_indirect_parameters_buffer) in indirect_parameters_buffers.iter() { + build_indirect_parameters_bind_groups.insert( + *phase_type_id, + PhaseBuildIndirectParametersBindGroups { + reset_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer + .indexed + .batch_sets_buffer(),) + { + (Some(indexed_batch_sets_buffer),) => Some( + render_device.create_bind_group( + "reset_indexed_indirect_batch_sets_bind_group", + // The early bind group is good for the main phase and late + // phase too. They bind the same buffers. + &pipelines + .early_phase + .reset_indirect_batch_sets + .bind_group_layout, + &BindGroupEntries::sequential(( + indexed_batch_sets_buffer.as_entire_binding(), + )), + ), + ), + _ => None, + }, - build_indexed_indirect: match ( - indirect_parameters_buffer.indexed_metadata_buffer(), - indirect_parameters_buffer.indexed_data_buffer(), - indirect_parameters_buffer.indexed_batch_sets_buffer(), - ) { - ( - Some(indexed_indirect_parameters_metadata_buffer), - Some(indexed_indirect_parameters_data_buffer), - Some(indexed_batch_sets_buffer), - ) => Some( - render_device.create_bind_group( - "build_indexed_indirect_parameters_bind_group", - // The frustum culling bind group is good for occlusion culling - // too. They bind the same buffers. - &pipelines - .gpu_frustum_culling_build_indexed_indirect_params - .bind_group_layout, - &BindGroupEntries::sequential(( - current_input_buffer.as_entire_binding(), - // Don't use `as_entire_binding` here; the shader reads - // the length and `RawBufferVec` overallocates. - BufferBinding { - buffer: indexed_indirect_parameters_metadata_buffer, - offset: 0, - size: NonZeroU64::new( - indirect_parameters_buffer.indexed_batch_count() as u64 - * size_of::() as u64, - ), - }, - indexed_batch_sets_buffer.as_entire_binding(), - indexed_indirect_parameters_data_buffer.as_entire_binding(), - )), - ), - ), - _ => None, - }, + reset_non_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer + .non_indexed + .batch_sets_buffer(),) + { + (Some(non_indexed_batch_sets_buffer),) => Some( + render_device.create_bind_group( + "reset_non_indexed_indirect_batch_sets_bind_group", + // The early bind group is good for the main phase and late + // phase too. They bind the same buffers. + &pipelines + .early_phase + .reset_indirect_batch_sets + .bind_group_layout, + &BindGroupEntries::sequential(( + non_indexed_batch_sets_buffer.as_entire_binding(), + )), + ), + ), + _ => None, + }, - build_non_indexed_indirect: match ( - indirect_parameters_buffer.non_indexed_metadata_buffer(), - indirect_parameters_buffer.non_indexed_data_buffer(), - indirect_parameters_buffer.non_indexed_batch_sets_buffer(), - ) { - ( - Some(non_indexed_indirect_parameters_metadata_buffer), - Some(non_indexed_indirect_parameters_data_buffer), - Some(non_indexed_batch_sets_buffer), - ) => Some( - render_device.create_bind_group( - "build_non_indexed_indirect_parameters_bind_group", - // The frustum culling bind group is good for occlusion culling - // too. They bind the same buffers. - &pipelines - .gpu_frustum_culling_build_non_indexed_indirect_params - .bind_group_layout, - &BindGroupEntries::sequential(( - current_input_buffer.as_entire_binding(), - // Don't use `as_entire_binding` here; the shader reads - // the length and `RawBufferVec` overallocates. - BufferBinding { - buffer: non_indexed_indirect_parameters_metadata_buffer, - offset: 0, - size: NonZeroU64::new( - indirect_parameters_buffer.non_indexed_batch_count() as u64 - * size_of::() as u64, - ), - }, - non_indexed_batch_sets_buffer.as_entire_binding(), - non_indexed_indirect_parameters_data_buffer.as_entire_binding(), - )), - ), - ), - _ => None, - }, - }); + build_indexed_indirect: match ( + phase_indirect_parameters_buffer + .indexed + .cpu_metadata_buffer(), + phase_indirect_parameters_buffer + .indexed + .gpu_metadata_buffer(), + phase_indirect_parameters_buffer.indexed.data_buffer(), + phase_indirect_parameters_buffer.indexed.batch_sets_buffer(), + ) { + ( + Some(indexed_indirect_parameters_cpu_metadata_buffer), + Some(indexed_indirect_parameters_gpu_metadata_buffer), + Some(indexed_indirect_parameters_data_buffer), + Some(indexed_batch_sets_buffer), + ) => Some( + render_device.create_bind_group( + "build_indexed_indirect_parameters_bind_group", + // The frustum culling bind group is good for occlusion culling + // too. They bind the same buffers. + &pipelines + .gpu_frustum_culling_build_indexed_indirect_params + .bind_group_layout, + &BindGroupEntries::sequential(( + current_input_buffer.as_entire_binding(), + // Don't use `as_entire_binding` here; the shader reads + // the length and `RawBufferVec` overallocates. + BufferBinding { + buffer: indexed_indirect_parameters_cpu_metadata_buffer, + offset: 0, + size: NonZeroU64::new( + phase_indirect_parameters_buffer.indexed.batch_count() + as u64 + * size_of::() as u64, + ), + }, + BufferBinding { + buffer: indexed_indirect_parameters_gpu_metadata_buffer, + offset: 0, + size: NonZeroU64::new( + phase_indirect_parameters_buffer.indexed.batch_count() + as u64 + * size_of::() as u64, + ), + }, + indexed_batch_sets_buffer.as_entire_binding(), + indexed_indirect_parameters_data_buffer.as_entire_binding(), + )), + ), + ), + _ => None, + }, + + build_non_indexed_indirect: match ( + phase_indirect_parameters_buffer + .non_indexed + .cpu_metadata_buffer(), + phase_indirect_parameters_buffer + .non_indexed + .gpu_metadata_buffer(), + phase_indirect_parameters_buffer.non_indexed.data_buffer(), + phase_indirect_parameters_buffer + .non_indexed + .batch_sets_buffer(), + ) { + ( + Some(non_indexed_indirect_parameters_cpu_metadata_buffer), + Some(non_indexed_indirect_parameters_gpu_metadata_buffer), + Some(non_indexed_indirect_parameters_data_buffer), + Some(non_indexed_batch_sets_buffer), + ) => Some( + render_device.create_bind_group( + "build_non_indexed_indirect_parameters_bind_group", + // The frustum culling bind group is good for occlusion culling + // too. They bind the same buffers. + &pipelines + .gpu_frustum_culling_build_non_indexed_indirect_params + .bind_group_layout, + &BindGroupEntries::sequential(( + current_input_buffer.as_entire_binding(), + // Don't use `as_entire_binding` here; the shader reads + // the length and `RawBufferVec` overallocates. + BufferBinding { + buffer: non_indexed_indirect_parameters_cpu_metadata_buffer, + offset: 0, + size: NonZeroU64::new( + phase_indirect_parameters_buffer.non_indexed.batch_count() + as u64 + * size_of::() as u64, + ), + }, + BufferBinding { + buffer: non_indexed_indirect_parameters_gpu_metadata_buffer, + offset: 0, + size: NonZeroU64::new( + phase_indirect_parameters_buffer.non_indexed.batch_count() + as u64 + * size_of::() as u64, + ), + }, + non_indexed_batch_sets_buffer.as_entire_binding(), + non_indexed_indirect_parameters_data_buffer.as_entire_binding(), + )), + ), + ), + _ => None, + }, + }, + ); + } + + commands.insert_resource(build_indirect_parameters_bind_groups); } /// Writes the information needed to do GPU mesh culling to the GPU. diff --git a/crates/bevy_pbr/src/render/light.rs b/crates/bevy_pbr/src/render/light.rs index f248a17451..d71dccc71a 100644 --- a/crates/bevy_pbr/src/render/light.rs +++ b/crates/bevy_pbr/src/render/light.rs @@ -8,12 +8,17 @@ use bevy_derive::{Deref, DerefMut}; use bevy_ecs::component::Tick; use bevy_ecs::system::SystemChangeTick; use bevy_ecs::{ - entity::{hash_map::EntityHashMap, hash_set::EntityHashSet}, + entity::{EntityHashMap, EntityHashSet}, prelude::*, system::lifetimeless::Read, }; use bevy_math::{ops, Mat4, UVec4, Vec2, Vec3, Vec3Swizzles, Vec4, Vec4Swizzles}; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; +use bevy_platform::hash::FixedHasher; +use bevy_render::experimental::occlusion_culling::{ + OcclusionCulling, OcclusionCullingSubview, OcclusionCullingSubviewEntities, +}; +use bevy_render::sync_world::MainEntityHashMap; use bevy_render::{ batching::gpu_preprocessing::{GpuPreprocessingMode, GpuPreprocessingSupport}, camera::SortedCameras, @@ -80,6 +85,8 @@ pub struct ExtractedDirectionalLight { pub frusta: EntityHashMap>, pub render_layers: RenderLayers, pub soft_shadow_size: Option, + /// True if this light is using two-phase occlusion culling. + pub occlusion_culling: bool, } // NOTE: These must match the bit flags in bevy_pbr/src/render/mesh_view_types.wgsl! @@ -252,6 +259,7 @@ pub fn extract_lights( &ViewVisibility, Option<&RenderLayers>, Option<&VolumetricLight>, + Has, ), Without, >, @@ -339,7 +347,7 @@ pub fn extract_lights( )); } *previous_point_lights_len = point_lights_values.len(); - commands.insert_or_spawn_batch(point_lights_values); + commands.try_insert_batch(point_lights_values); let mut spot_lights_values = Vec::with_capacity(*previous_spot_lights_len); for entity in global_point_lights.iter().copied() { @@ -402,7 +410,7 @@ pub fn extract_lights( } } *previous_spot_lights_len = spot_lights_values.len(); - commands.insert_or_spawn_batch(spot_lights_values); + commands.try_insert_batch(spot_lights_values); for ( main_entity, @@ -416,6 +424,7 @@ pub fn extract_lights( view_visibility, maybe_layers, volumetric_light, + occlusion_culling, ) in &directional_lights { if !view_visibility.get() { @@ -481,6 +490,7 @@ pub fn extract_lights( cascades: extracted_cascades, frusta: extracted_frusta, render_layers: maybe_layers.unwrap_or_default().clone(), + occlusion_culling, }, RenderCascadesVisibleEntities { entities: cascade_visible_entities, @@ -515,7 +525,7 @@ pub(crate) fn add_light_view_entities( trigger: Trigger, mut commands: Commands, ) { - if let Some(mut v) = commands.get_entity(trigger.target()) { + if let Ok(mut v) = commands.get_entity(trigger.target()) { v.insert(LightViewEntities::default()); } } @@ -525,7 +535,7 @@ pub(crate) fn extracted_light_removed( trigger: Trigger, mut commands: Commands, ) { - if let Some(mut v) = commands.get_entity(trigger.target()) { + if let Ok(mut v) = commands.get_entity(trigger.target()) { v.try_remove::(); } } @@ -538,7 +548,7 @@ pub(crate) fn remove_light_view_entities( if let Ok(entities) = query.get(trigger.target()) { for v in entities.0.values() { for e in v.iter().copied() { - if let Some(mut v) = commands.get_entity(e) { + if let Ok(mut v) = commands.get_entity(e) { v.despawn(); } } @@ -1064,7 +1074,7 @@ pub fn prepare_lights( // NOTE: iOS Simulator is missing CubeArray support so we use Cube instead. // See https://github.com/bevyengine/bevy/pull/12052 - remove if support is added. #[cfg(all( - not(feature = "ios_simulator"), + not(target_abi = "sim"), any( not(feature = "webgl"), not(target_arch = "wasm32"), @@ -1073,7 +1083,7 @@ pub fn prepare_lights( ))] dimension: Some(TextureViewDimension::CubeArray), #[cfg(any( - feature = "ios_simulator", + target_abi = "sim", all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")) ))] dimension: Some(TextureViewDimension::Cube), @@ -1146,7 +1156,9 @@ pub fn prepare_lights( .filter_map(|sorted_camera| views.get(sorted_camera.entity).ok()) { live_views.insert(entity); + let mut view_lights = Vec::new(); + let mut view_occlusion_culling_lights = Vec::new(); let gpu_preprocessing_mode = gpu_preprocessing_support.min(if !no_indirect_drawing { GpuPreprocessingMode::Culling @@ -1492,7 +1504,7 @@ pub fn prepare_lights( // NOTE: For point and spotlights, we reuse the same depth attachment for all views. // However, for directional lights, we want a new depth attachment for each view, // so that the view is cleared for each view. - let depth_attachment = DepthAttachment::new(depth_texture_view, Some(0.0)); + let depth_attachment = DepthAttachment::new(depth_texture_view.clone(), Some(0.0)); directional_depth_texture_array_index += 1; @@ -1541,6 +1553,18 @@ pub fn prepare_lights( view_lights.push(view_light_entity); + // If this light is using occlusion culling, add the appropriate components. + if light.occlusion_culling { + commands.entity(view_light_entity).insert(( + OcclusionCulling, + OcclusionCullingSubview { + depth_texture_view, + depth_texture_size: directional_light_shadow_map.size as u32, + }, + )); + view_occlusion_culling_lights.push(view_light_entity); + } + // Subsequent views with the same light entity will **NOT** reuse the same shadow map // (Because the cascades are unique to each view) // TODO: Implement GPU culling for shadow passes. @@ -1564,6 +1588,16 @@ pub fn prepare_lights( offset: view_gpu_lights_writer.write(&gpu_lights), }, )); + + // Make a link from the camera to all shadow cascades with occlusion + // culling enabled. + if !view_occlusion_culling_lights.is_empty() { + commands + .entity(entity) + .insert(OcclusionCullingSubviewEntities( + view_occlusion_culling_lights, + )); + } } // Despawn light-view entities for views that no longer exist @@ -1613,9 +1647,16 @@ pub struct LightSpecializationTicks(HashMap); #[derive(Resource, Deref, DerefMut)] pub struct SpecializedShadowMaterialPipelineCache { - // (view_light_entity, visible_entity) -> (tick, pipeline_id) + // view light entity -> view pipeline cache #[deref] - map: HashMap<(RetainedViewEntity, MainEntity), (Tick, CachedRenderPipelineId)>, + map: HashMap>, + marker: PhantomData, +} + +#[derive(Deref, DerefMut)] +pub struct SpecializedShadowMaterialViewPipelineCache { + #[deref] + map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>, marker: PhantomData, } @@ -1628,6 +1669,15 @@ impl Default for SpecializedShadowMaterialPipelineCache { } } +impl Default for SpecializedShadowMaterialViewPipelineCache { + fn default() -> Self { + Self { + map: MainEntityHashMap::default(), + marker: PhantomData, + } + } +} + pub fn check_views_lights_need_specialization( view_lights: Query<&ViewLightEntities, With>, view_light_entities: Query<(&LightEntity, &ExtractedView)>, @@ -1679,7 +1729,7 @@ pub fn specialize_shadows( Res>, Res, Res>>, - Res>, + Res, Res>, ), shadow_render_phases: Res>, @@ -1702,6 +1752,10 @@ pub fn specialize_shadows( ) where M::Data: PartialEq + Eq + Hash + Clone, { + // Record the retained IDs of all shadow views so that we can expire old + // pipeline IDs. + let mut all_shadow_views: HashSet = HashSet::default(); + for (entity, view_lights) in &view_lights { for view_light_entity in view_lights.lights.iter().copied() { let Ok((light_entity, extracted_view_light)) = @@ -1709,6 +1763,9 @@ pub fn specialize_shadows( else { continue; }; + + all_shadow_views.insert(extracted_view_light.retained_view_entity); + if !shadow_render_phases.contains_key(&extracted_view_light.retained_view_entity) { continue; } @@ -1744,13 +1801,30 @@ pub fn specialize_shadows( // NOTE: Lights with shadow mapping disabled will have no visible entities // so no meshes will be queued + let view_tick = light_specialization_ticks + .get(&extracted_view_light.retained_view_entity) + .unwrap(); + let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache + .entry(extracted_view_light.retained_view_entity) + .or_default(); + for (_, visible_entity) in visible_entities.iter().copied() { - let view_tick = light_specialization_ticks - .get(&extracted_view_light.retained_view_entity) - .unwrap(); + let Some(material_instances) = + render_material_instances.instances.get(&visible_entity) + else { + continue; + }; + let Ok(material_asset_id) = material_instances.asset_id.try_typed::() else { + continue; + }; + let Some(mesh_instance) = + render_mesh_instances.render_mesh_queue_data(visible_entity) + else { + continue; + }; let entity_tick = entity_specialization_ticks.get(&visible_entity).unwrap(); - let last_specialized_tick = specialized_material_pipeline_cache - .get(&(extracted_view_light.retained_view_entity, visible_entity)) + let last_specialized_tick = view_specialized_material_pipeline_cache + .get(&visible_entity) .map(|(tick, _)| *tick); let needs_specialization = last_specialized_tick.is_none_or(|tick| { view_tick.is_newer_than(tick, ticks.this_run()) @@ -1759,10 +1833,7 @@ pub fn specialize_shadows( if !needs_specialization { continue; } - - let Some(mesh_instance) = - render_mesh_instances.render_mesh_queue_data(visible_entity) - else { + let Some(material) = render_materials.get(material_asset_id) else { continue; }; if !mesh_instance @@ -1771,12 +1842,6 @@ pub fn specialize_shadows( { continue; } - let Some(material_asset_id) = render_material_instances.get(&visible_entity) else { - continue; - }; - let Some(material) = render_materials.get(*material_asset_id) else { - continue; - }; let Some(material_bind_group) = material_bind_group_allocator.get(material.binding.group) else { @@ -1829,13 +1894,14 @@ pub fn specialize_shadows( } }; - specialized_material_pipeline_cache.insert( - (extracted_view_light.retained_view_entity, visible_entity), - (ticks.this_run(), pipeline_id), - ); + view_specialized_material_pipeline_cache + .insert(visible_entity, (ticks.this_run(), pipeline_id)); } } } + + // Delete specialized pipelines belonging to views that have expired. + specialized_material_pipeline_cache.retain(|view, _| all_shadow_views.contains(view)); } /// For each shadow cascade, iterates over all the meshes "visible" from it and @@ -1845,7 +1911,7 @@ pub fn queue_shadows( shadow_draw_functions: Res>, render_mesh_instances: Res, render_materials: Res>>, - render_material_instances: Res>, + render_material_instances: Res, mut shadow_render_phases: ResMut>, gpu_preprocessing_support: Res, mesh_allocator: Res, @@ -1875,6 +1941,12 @@ pub fn queue_shadows( continue; }; + let Some(view_specialized_material_pipeline_cache) = + specialized_material_pipeline_cache.get(&extracted_view_light.retained_view_entity) + else { + continue; + }; + let visible_entities = match light_entity { LightEntity::Directional { light_entity, @@ -1900,8 +1972,8 @@ pub fn queue_shadows( }; for (entity, main_entity) in visible_entities.iter().copied() { - let Some((current_change_tick, pipeline_id)) = specialized_material_pipeline_cache - .get(&(extracted_view_light.retained_view_entity, main_entity)) + let Some((current_change_tick, pipeline_id)) = + view_specialized_material_pipeline_cache.get(&main_entity) else { continue; }; @@ -1922,10 +1994,14 @@ pub fn queue_shadows( continue; } - let Some(material_asset_id) = render_material_instances.get(&main_entity) else { + let Some(material_instance) = render_material_instances.instances.get(&main_entity) + else { continue; }; - let Some(material) = render_materials.get(*material_asset_id) else { + let Ok(material_asset_id) = material_instance.asset_id.try_typed::() else { + continue; + }; + let Some(material) = render_materials.get(material_asset_id) else { continue; }; @@ -1946,6 +2022,7 @@ pub fn queue_shadows( asset_id: mesh_instance.mesh_asset_id.into(), }, (entity, main_entity), + mesh_instance.current_uniform_index, BinnedRenderPhaseType::mesh( mesh_instance.should_batch(), &gpu_preprocessing_support, @@ -1953,9 +2030,6 @@ pub fn queue_shadows( *current_change_tick, ); } - - // Remove invalid entities from the bins. - shadow_phase.sweep_old_entities(); } } } @@ -2081,13 +2155,44 @@ impl CachedRenderPipelinePhaseItem for Shadow { } } +/// The rendering node that renders meshes that were "visible" (so to speak) +/// from a light last frame. +/// +/// If occlusion culling for a light is disabled, then this node simply renders +/// all meshes in range of the light. +#[derive(Deref, DerefMut)] +pub struct EarlyShadowPassNode(ShadowPassNode); + +/// The rendering node that renders meshes that became newly "visible" (so to +/// speak) from a light this frame. +/// +/// If occlusion culling for a light is disabled, then this node does nothing. +#[derive(Deref, DerefMut)] +pub struct LateShadowPassNode(ShadowPassNode); + +/// Encapsulates rendering logic shared between the early and late shadow pass +/// nodes. pub struct ShadowPassNode { + /// The query that finds cameras in which shadows are visible. main_view_query: QueryState>, - view_light_query: QueryState<(Read, Read)>, + /// The query that finds shadow cascades. + view_light_query: QueryState<(Read, Read, Has)>, } -impl ShadowPassNode { - pub fn new(world: &mut World) -> Self { +impl FromWorld for EarlyShadowPassNode { + fn from_world(world: &mut World) -> Self { + Self(ShadowPassNode::from_world(world)) + } +} + +impl FromWorld for LateShadowPassNode { + fn from_world(world: &mut World) -> Self { + Self(ShadowPassNode::from_world(world)) + } +} + +impl FromWorld for ShadowPassNode { + fn from_world(world: &mut World) -> Self { Self { main_view_query: QueryState::new(world), view_light_query: QueryState::new(world), @@ -2095,10 +2200,9 @@ impl ShadowPassNode { } } -impl Node for ShadowPassNode { +impl Node for EarlyShadowPassNode { fn update(&mut self, world: &mut World) { - self.main_view_query.update_archetypes(world); - self.view_light_query.update_archetypes(world); + self.0.update(world); } fn run<'w>( @@ -2107,25 +2211,61 @@ impl Node for ShadowPassNode { render_context: &mut RenderContext<'w>, world: &'w World, ) -> Result<(), NodeRunError> { - let diagnostics = render_context.diagnostic_recorder(); + self.0.run(graph, render_context, world, false) + } +} - let view_entity = graph.view_entity(); +impl Node for LateShadowPassNode { + fn update(&mut self, world: &mut World) { + self.0.update(world); + } + fn run<'w>( + &self, + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + ) -> Result<(), NodeRunError> { + self.0.run(graph, render_context, world, true) + } +} + +impl ShadowPassNode { + fn update(&mut self, world: &mut World) { + self.main_view_query.update_archetypes(world); + self.view_light_query.update_archetypes(world); + } + + /// Runs the node logic. + /// + /// `is_late` is true if this is the late shadow pass or false if this is + /// the early shadow pass. + fn run<'w>( + &self, + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + is_late: bool, + ) -> Result<(), NodeRunError> { let Some(shadow_render_phases) = world.get_resource::>() else { return Ok(()); }; - let time_span = diagnostics.time_span(render_context.command_encoder(), "shadows"); - - if let Ok(view_lights) = self.main_view_query.get_manual(world, view_entity) { + if let Ok(view_lights) = self.main_view_query.get_manual(world, graph.view_entity()) { for view_light_entity in view_lights.lights.iter().copied() { - let Ok((view_light, extracted_light_view)) = + let Ok((view_light, extracted_light_view, occlusion_culling)) = self.view_light_query.get_manual(world, view_light_entity) else { continue; }; + // There's no need for a late shadow pass if the light isn't + // using occlusion culling. + if is_late && !occlusion_culling { + continue; + } + let Some(shadow_phase) = shadow_render_phases.get(&extracted_light_view.retained_view_entity) else { @@ -2169,8 +2309,6 @@ impl Node for ShadowPassNode { } } - time_span.end(render_context.command_encoder()); - Ok(()) } } diff --git a/crates/bevy_pbr/src/render/mesh.rs b/crates/bevy_pbr/src/render/mesh.rs index e716eb166e..4bae79b807 100644 --- a/crates/bevy_pbr/src/render/mesh.rs +++ b/crates/bevy_pbr/src/render/mesh.rs @@ -1,6 +1,6 @@ use crate::material_bind_groups::{MaterialBindGroupIndex, MaterialBindGroupSlot}; use allocator::MeshAllocator; -use bevy_asset::{load_internal_asset, AssetId, UntypedAssetId}; +use bevy_asset::{load_internal_asset, AssetId}; use bevy_core_pipeline::{ core_3d::{AlphaMask3d, Opaque3d, Transmissive3d, Transparent3d, CORE_3D_DEPTH_FORMAT}, deferred::{AlphaMask3dDeferred, Opaque3dDeferred}, @@ -8,45 +8,47 @@ use bevy_core_pipeline::{ prepass::MotionVectorPrepass, }; use bevy_derive::{Deref, DerefMut}; +use bevy_diagnostic::FrameCount; use bevy_ecs::{ prelude::*, - query::ROQueryItem, + query::{QueryData, ROQueryItem}, system::{lifetimeless::*, SystemParamItem, SystemState}, }; use bevy_image::{BevyDefault, ImageSampler, TextureFormatPixelInfo}; use bevy_math::{Affine3, Rect, UVec2, Vec3, Vec4}; -use bevy_platform_support::collections::{hash_map::Entry, HashMap}; +use bevy_platform::collections::{hash_map::Entry, HashMap}; use bevy_render::{ batching::{ gpu_preprocessing::{ self, GpuPreprocessingSupport, IndirectBatchSet, IndirectParametersBuffers, - IndirectParametersIndexed, IndirectParametersMetadata, IndirectParametersNonIndexed, - InstanceInputUniformBuffer, + IndirectParametersCpuMetadata, IndirectParametersIndexed, IndirectParametersNonIndexed, + InstanceInputUniformBuffer, UntypedPhaseIndirectParametersBuffers, }, no_gpu_preprocessing, GetBatchData, GetFullBatchData, NoAutomaticBatching, }, camera::Camera, - mesh::*, + mesh::{skinning::SkinnedMesh, *}, primitives::Aabb, render_asset::RenderAssets, render_phase::{ - BinnedRenderPhasePlugin, PhaseItem, PhaseItemExtraIndex, RenderCommand, + BinnedRenderPhasePlugin, InputUniformIndex, PhaseItem, PhaseItemExtraIndex, RenderCommand, RenderCommandResult, SortedRenderPhasePlugin, TrackedRenderPass, }, render_resource::*, renderer::{RenderAdapter, RenderDevice, RenderQueue}, - texture::DefaultImageSampler, + sync_world::MainEntityHashSet, + texture::{DefaultImageSampler, GpuImage}, view::{ - self, NoFrustumCulling, NoIndirectDrawing, RenderVisibilityRanges, ViewTarget, - ViewUniformOffset, ViewVisibility, VisibilityRange, + self, NoFrustumCulling, NoIndirectDrawing, RenderVisibilityRanges, RetainedViewEntity, + ViewTarget, ViewUniformOffset, ViewVisibility, VisibilityRange, }, Extract, }; use bevy_transform::components::GlobalTransform; -use bevy_utils::{default, Parallel}; +use bevy_utils::{default, Parallel, TypeIdMap}; +use core::any::TypeId; use core::mem::size_of; use material_bind_groups::MaterialBindingId; -use render::skin::{self, SkinIndex}; use tracing::{error, warn}; use self::irradiance_volume::IRRADIANCE_VOLUMES_ARE_USABLE; @@ -79,13 +81,24 @@ use smallvec::{smallvec, SmallVec}; use static_assertions::const_assert_eq; /// Provides support for rendering 3D meshes. -#[derive(Default)] pub struct MeshRenderPlugin { /// Whether we're building [`MeshUniform`]s on GPU. /// /// This requires compute shader support and so will be forcibly disabled if /// the platform doesn't support those. pub use_gpu_instance_buffer_builder: bool, + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, +} + +impl MeshRenderPlugin { + /// Creates a new [`MeshRenderPlugin`] with the given debug flags. + pub fn new(debug_flags: RenderDebugFlags) -> MeshRenderPlugin { + MeshRenderPlugin { + use_gpu_instance_buffer_builder: false, + debug_flags, + } + } } pub const FORWARD_IO_HANDLE: Handle = weak_handle!("38111de1-6e35-4dbb-877b-7b6f9334baf6"); @@ -166,26 +179,26 @@ impl Plugin for MeshRenderPlugin { (no_automatic_skin_batching, no_automatic_morph_batching), ) .add_plugins(( - BinnedRenderPhasePlugin::::default(), - BinnedRenderPhasePlugin::::default(), - BinnedRenderPhasePlugin::::default(), - BinnedRenderPhasePlugin::::default(), - BinnedRenderPhasePlugin::::default(), - SortedRenderPhasePlugin::::default(), - SortedRenderPhasePlugin::::default(), + BinnedRenderPhasePlugin::::new(self.debug_flags), + BinnedRenderPhasePlugin::::new(self.debug_flags), + BinnedRenderPhasePlugin::::new(self.debug_flags), + BinnedRenderPhasePlugin::::new(self.debug_flags), + BinnedRenderPhasePlugin::::new(self.debug_flags), + SortedRenderPhasePlugin::::new(self.debug_flags), + SortedRenderPhasePlugin::::new(self.debug_flags), )); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app - .init_resource::() - .init_resource::() .init_resource::() .init_resource::() .init_resource::() - .init_resource::() + .init_resource::() .configure_sets( ExtractSchedule, - ExtractMeshesSet.after(view::extract_visibility_ranges), + ExtractMeshesSet + .after(view::extract_visibility_ranges) + .after(late_sweep_material_instances), ) .add_systems( ExtractSchedule, @@ -202,7 +215,7 @@ impl Plugin for MeshRenderPlugin { set_mesh_motion_vector_flags.in_set(RenderSet::PrepareMeshes), prepare_skins.in_set(RenderSet::PrepareResources), prepare_morphs.in_set(RenderSet::PrepareResources), - prepare_mesh_bind_group.in_set(RenderSet::PrepareBindGroups), + prepare_mesh_bind_groups.in_set(RenderSet::PrepareBindGroups), prepare_mesh_view_bind_groups .in_set(RenderSet::PrepareBindGroups) .after(prepare_oit_buffers), @@ -238,12 +251,15 @@ impl Plugin for MeshRenderPlugin { if use_gpu_instance_buffer_builder { render_app - .init_resource::>() + .init_resource::>() .init_resource::() + .init_resource::() .add_systems( ExtractSchedule, - extract_meshes_for_gpu_building - .in_set(ExtractMeshesSet), + extract_meshes_for_gpu_building.in_set(ExtractMeshesSet), ) .add_systems( Render, @@ -305,16 +321,15 @@ impl Plugin for MeshRenderPlugin { } #[derive(Resource, Deref, DerefMut, Default, Debug, Clone)] -pub struct ViewKeyCache(MainEntityHashMap); +pub struct ViewKeyCache(HashMap); #[derive(Resource, Deref, DerefMut, Default, Debug, Clone)] -pub struct ViewSpecializationTicks(MainEntityHashMap); +pub struct ViewSpecializationTicks(HashMap); pub fn check_views_need_specialization( mut view_key_cache: ResMut, mut view_specialization_ticks: ResMut, mut views: Query<( - &MainEntity, &ExtractedView, &Msaa, Option<&Tonemapping>, @@ -340,7 +355,6 @@ pub fn check_views_need_specialization( ticks: SystemChangeTick, ) { for ( - view_entity, view, msaa, tonemapping, @@ -432,11 +446,11 @@ pub fn check_views_need_specialization( ); } if !view_key_cache - .get_mut(view_entity) + .get_mut(&view.retained_view_entity) .is_some_and(|current_key| *current_key == view_key) { - view_key_cache.insert(*view_entity, view_key); - view_specialization_ticks.insert(*view_entity, ticks.this_run()); + view_key_cache.insert(view.retained_view_entity, view_key); + view_specialization_ticks.insert(view.retained_view_entity, ticks.this_run()); } } } @@ -478,13 +492,15 @@ pub struct MeshUniform { pub first_vertex_index: u32, /// The current skin index, or `u32::MAX` if there's no skin. pub current_skin_index: u32, - /// The previous skin index, or `u32::MAX` if there's no previous skin. - pub previous_skin_index: u32, /// The material and lightmap indices, packed into 32 bits. /// /// Low 16 bits: index of the material inside the bind group data. /// High 16 bits: index of the lightmap in the binding array. pub material_and_lightmap_bind_group_slot: u32, + /// User supplied tag to identify this mesh instance. + pub tag: u32, + /// Padding. + pub pad: u32, } /// Information that has to be transferred from CPU to GPU in order to produce @@ -534,17 +550,23 @@ pub struct MeshInputUniform { pub index_count: u32, /// The current skin index, or `u32::MAX` if there's no skin. pub current_skin_index: u32, - /// The previous skin index, or `u32::MAX` if there's no previous skin. - pub previous_skin_index: u32, /// The material and lightmap indices, packed into 32 bits. /// /// Low 16 bits: index of the material inside the bind group data. /// High 16 bits: index of the lightmap in the binding array. pub material_and_lightmap_bind_group_slot: u32, + /// The number of the frame on which this [`MeshInputUniform`] was built. + /// + /// This is used to validate the previous transform and skin. If this + /// [`MeshInputUniform`] wasn't updated on this frame, then we know that + /// neither this mesh's transform nor that of its joints have been updated + /// on this frame, and therefore the transforms of both this mesh and its + /// joints must be identical to those for the previous frame. + pub timestamp: u32, + /// User supplied tag to identify this mesh instance. + pub tag: u32, /// Padding. - pub pad_a: u32, - /// Padding. - pub pad_b: u32, + pub pad: u32, } /// Information about each mesh instance needed to cull it on GPU. @@ -577,7 +599,7 @@ impl MeshUniform { material_bind_group_slot: MaterialBindGroupSlot, maybe_lightmap: Option<(LightmapSlotIndex, Rect)>, current_skin_index: Option, - previous_skin_index: Option, + tag: Option, ) -> Self { let (local_from_world_transpose_a, local_from_world_transpose_b) = mesh_transforms.world_from_local.inverse_transpose_3x3(); @@ -595,9 +617,10 @@ impl MeshUniform { flags: mesh_transforms.flags, first_vertex_index, current_skin_index: current_skin_index.unwrap_or(u32::MAX), - previous_skin_index: previous_skin_index.unwrap_or(u32::MAX), material_and_lightmap_bind_group_slot: u32::from(material_bind_group_slot) | ((lightmap_bind_group_slot as u32) << 16), + tag: tag.unwrap_or(0), + pad: 0, } } } @@ -729,6 +752,8 @@ pub struct RenderMeshInstanceShared { /// Index of the slab that the lightmap resides in, if a lightmap is /// present. pub lightmap_slab_index: Option, + /// User supplied tag to identify this mesh instance. + pub tag: u32, } /// Information that is gathered during the parallel portion of mesh extraction @@ -804,10 +829,18 @@ pub enum RenderMeshInstanceGpuQueue { #[derive(Resource, Default, Deref, DerefMut)] pub struct RenderMeshInstanceGpuQueues(Parallel); +/// Holds a list of meshes that couldn't be extracted this frame because their +/// materials weren't prepared yet. +/// +/// On subsequent frames, we try to reextract those meshes. +#[derive(Resource, Default, Deref, DerefMut)] +pub struct MeshesToReextractNextFrame(MainEntityHashSet); + impl RenderMeshInstanceShared { fn from_components( previous_transform: Option<&PreviousGlobalTransform>, mesh: &Mesh3d, + tag: Option<&MeshTag>, not_shadow_caster: bool, no_automatic_batching: bool, ) -> Self { @@ -828,6 +861,7 @@ impl RenderMeshInstanceShared { // This gets filled in later, during `RenderMeshGpuBuilder::update`. material_bindings_index: default(), lightmap_slab_index: None, + tag: tag.map_or(0, |i| **i), } } @@ -863,37 +897,6 @@ pub struct RenderMeshInstancesCpu(MainEntityHashMap); #[derive(Default, Deref, DerefMut)] pub struct RenderMeshInstancesGpu(MainEntityHashMap); -/// Maps each mesh instance to the material ID, and allocated binding ID, -/// associated with that mesh instance. -#[derive(Resource, Default)] -pub struct RenderMeshMaterialIds { - /// Maps the mesh instance to the material ID. - mesh_to_material: MainEntityHashMap, -} - -impl RenderMeshMaterialIds { - /// Returns the mesh material ID for the entity with the given mesh, or a - /// dummy mesh material ID if the mesh has no material ID. - /// - /// Meshes almost always have materials, but in very specific circumstances - /// involving custom pipelines they won't. (See the - /// `specialized_mesh_pipelines` example.) - pub(crate) fn mesh_material(&self, entity: MainEntity) -> UntypedAssetId { - self.mesh_to_material - .get(&entity) - .cloned() - .unwrap_or(AssetId::::invalid().into()) - } - - pub(crate) fn insert(&mut self, mesh_entity: MainEntity, material_id: UntypedAssetId) { - self.mesh_to_material.insert(mesh_entity, material_id); - } - - pub(crate) fn remove(&mut self, main_entity: MainEntity) { - self.mesh_to_material.remove(&main_entity); - } -} - impl RenderMeshInstances { /// Creates a new [`RenderMeshInstances`] instance. fn new(use_gpu_instance_buffer_builder: bool) -> RenderMeshInstances { @@ -905,7 +908,7 @@ impl RenderMeshInstances { } /// Returns the ID of the mesh asset attached to the given entity, if any. - pub(crate) fn mesh_asset_id(&self, entity: MainEntity) -> Option> { + pub fn mesh_asset_id(&self, entity: MainEntity) -> Option> { match *self { RenderMeshInstances::CpuBuilding(ref instances) => instances.mesh_asset_id(entity), RenderMeshInstances::GpuBuilding(ref instances) => instances.mesh_asset_id(entity), @@ -950,6 +953,7 @@ impl RenderMeshInstancesCpu { .map(|render_mesh_instance| RenderMeshQueueData { shared: &render_mesh_instance.shared, translation: render_mesh_instance.transforms.world_from_local.translation, + current_uniform_index: InputUniformIndex::default(), }) } @@ -973,6 +977,9 @@ impl RenderMeshInstancesGpu { .map(|render_mesh_instance| RenderMeshQueueData { shared: &render_mesh_instance.shared, translation: render_mesh_instance.translation, + current_uniform_index: InputUniformIndex( + render_mesh_instance.current_uniform_index.into(), + ), }) } @@ -1092,11 +1099,13 @@ impl RenderMeshInstanceGpuBuilder { current_input_buffer: &mut InstanceInputUniformBuffer, previous_input_buffer: &mut InstanceInputUniformBuffer, mesh_allocator: &MeshAllocator, - mesh_material_ids: &RenderMeshMaterialIds, + mesh_material_ids: &RenderMaterialInstances, render_material_bindings: &RenderMaterialBindings, render_lightmaps: &RenderLightmaps, - skin_indices: &SkinIndices, - ) -> u32 { + skin_uniforms: &SkinUniforms, + timestamp: FrameCount, + meshes_to_reextract_next_frame: &mut MeshesToReextractNextFrame, + ) -> Option { let (first_vertex_index, vertex_count) = match mesh_allocator.mesh_vertex_slice(&self.shared.mesh_asset_id) { Some(mesh_vertex_slice) => ( @@ -1114,22 +1123,28 @@ impl RenderMeshInstanceGpuBuilder { ), None => (false, 0, 0), }; - - let current_skin_index = match skin_indices.current.get(&entity) { - Some(skin_indices) => skin_indices.index(), - None => u32::MAX, - }; - let previous_skin_index = match skin_indices.prev.get(&entity) { - Some(skin_indices) => skin_indices.index(), + let current_skin_index = match skin_uniforms.skin_byte_offset(entity) { + Some(skin_index) => skin_index.index(), None => u32::MAX, }; - // Look up the material index. + // Look up the material index. If we couldn't fetch the material index, + // then the material hasn't been prepared yet, perhaps because it hasn't + // yet loaded. In that case, add the mesh to + // `meshes_to_reextract_next_frame` and bail. let mesh_material = mesh_material_ids.mesh_material(entity); - let mesh_material_binding_id = render_material_bindings - .get(&mesh_material) - .cloned() - .unwrap_or_default(); + let mesh_material_binding_id = if mesh_material != DUMMY_MESH_MATERIAL.untyped() { + match render_material_bindings.get(&mesh_material) { + Some(binding_id) => *binding_id, + None => { + meshes_to_reextract_next_frame.insert(entity); + return None; + } + } + } else { + // Use a dummy material binding ID. + MaterialBindingId::default() + }; self.shared.material_bindings_index = mesh_material_binding_id; let lightmap_slot = match render_lightmaps.render_lightmaps.get(&entity) { @@ -1148,6 +1163,7 @@ impl RenderMeshInstanceGpuBuilder { lightmap_uv_rect: self.lightmap_uv_rect, flags: self.mesh_flags.bits(), previous_input_index: u32::MAX, + timestamp: timestamp.0, first_vertex_index, first_index_index, index_count: if mesh_is_indexed { @@ -1156,12 +1172,11 @@ impl RenderMeshInstanceGpuBuilder { vertex_count }, current_skin_index, - previous_skin_index, material_and_lightmap_bind_group_slot: u32::from( self.shared.material_bindings_index.slot, ) | ((lightmap_slot as u32) << 16), - pad_a: 0, - pad_b: 0, + tag: self.shared.tag, + pad: 0, }; // Did the last frame contain this entity as well? @@ -1206,7 +1221,7 @@ impl RenderMeshInstanceGpuBuilder { } } - current_uniform_index + Some(current_uniform_index) } } @@ -1273,6 +1288,9 @@ pub struct RenderMeshQueueData<'a> { pub shared: &'a RenderMeshInstanceShared, /// The translation of the mesh instance. pub translation: Vec3, + /// The index of the [`MeshInputUniform`] in the GPU buffer for this mesh + /// instance. + pub current_uniform_index: InputUniformIndex, } /// A [`SystemSet`] that encompasses both [`extract_meshes_for_cpu_building`] @@ -1296,6 +1314,7 @@ pub fn extract_meshes_for_cpu_building( &GlobalTransform, Option<&PreviousGlobalTransform>, &Mesh3d, + Option<&MeshTag>, Has, Has, Has, @@ -1314,6 +1333,7 @@ pub fn extract_meshes_for_cpu_building( transform, previous_transform, mesh, + tag, no_frustum_culling, not_shadow_receiver, transmitted_receiver, @@ -1341,6 +1361,7 @@ pub fn extract_meshes_for_cpu_building( let shared = RenderMeshInstanceShared::from_components( previous_transform, mesh, + tag, not_shadow_caster, no_automatic_batching, ); @@ -1380,6 +1401,24 @@ pub fn extract_meshes_for_cpu_building( } } +/// All the data that we need from a mesh in the main world. +type GpuMeshExtractionQuery = ( + Entity, + Read, + Read, + Option>, + Option>, + Option>, + Read, + Option>, + Has, + Has, + Has, + Has, + Has, + Has, +); + /// Extracts meshes from the main world into the render world and queues /// [`MeshInputUniform`]s to be uploaded to the GPU. /// @@ -1394,21 +1433,7 @@ pub fn extract_meshes_for_gpu_building( mut render_mesh_instance_queues: ResMut, changed_meshes_query: Extract< Query< - ( - Entity, - &ViewVisibility, - &GlobalTransform, - Option<&PreviousGlobalTransform>, - Option<&Lightmap>, - Option<&Aabb>, - &Mesh3d, - Has, - Has, - Has, - Has, - Has, - Has, - ), + GpuMeshExtractionQuery, Or<( Changed, Changed, @@ -1422,13 +1447,16 @@ pub fn extract_meshes_for_gpu_building( Changed, Changed, Changed, + Changed, )>, >, >, + all_meshes_query: Extract>, mut removed_visibilities_query: Extract>, mut removed_global_transforms_query: Extract>, mut removed_meshes_query: Extract>, gpu_culling_query: Extract, Without)>>, + meshes_to_reextract_next_frame: ResMut, ) { let any_gpu_culling = !gpu_culling_query.is_empty(); @@ -1450,80 +1478,37 @@ pub fn extract_meshes_for_gpu_building( // construct the `MeshInputUniform` for them. changed_meshes_query.par_iter().for_each_init( || render_mesh_instance_queues.borrow_local_mut(), - |queue, - ( - entity, - view_visibility, - transform, - previous_transform, - lightmap, - aabb, - mesh, - no_frustum_culling, - not_shadow_receiver, - transmitted_receiver, - not_shadow_caster, - no_automatic_batching, - visibility_range, - )| { - if !view_visibility.get() { - queue.remove(entity.into(), any_gpu_culling); - return; - } - - let mut lod_index = None; - if visibility_range { - lod_index = render_visibility_ranges.lod_index_for_entity(entity.into()); - } - - let mesh_flags = MeshFlags::from_components( - transform, - lod_index, - no_frustum_culling, - not_shadow_receiver, - transmitted_receiver, - ); - - let shared = RenderMeshInstanceShared::from_components( - previous_transform, - mesh, - not_shadow_caster, - no_automatic_batching, - ); - - let lightmap_uv_rect = pack_lightmap_uv_rect(lightmap.map(|lightmap| lightmap.uv_rect)); - - let gpu_mesh_culling_data = any_gpu_culling.then(|| MeshCullingData::new(aabb)); - - let previous_input_index = if shared - .flags - .contains(RenderMeshInstanceFlags::HAS_PREVIOUS_TRANSFORM) - { - render_mesh_instances - .get(&MainEntity::from(entity)) - .map(|render_mesh_instance| render_mesh_instance.current_uniform_index) - } else { - None - }; - - let gpu_mesh_instance_builder = RenderMeshInstanceGpuBuilder { - shared, - world_from_local: (&transform.affine()).into(), - lightmap_uv_rect, - mesh_flags, - previous_input_index, - }; - - queue.push( - entity.into(), - gpu_mesh_instance_builder, - gpu_mesh_culling_data, + |queue, query_row| { + extract_mesh_for_gpu_building( + query_row, + &render_visibility_ranges, + render_mesh_instances, + queue, + any_gpu_culling, ); }, ); - // Also record info about each mesh that became invisible. + // Process materials that `collect_meshes_for_gpu_building` marked as + // needing to be reextracted. This will happen when we extracted a mesh on + // some previous frame, but its material hadn't been prepared yet, perhaps + // because the material hadn't yet been loaded. We reextract such materials + // on subsequent frames so that `collect_meshes_for_gpu_building` will check + // to see if their materials have been prepared. let mut queue = render_mesh_instance_queues.borrow_local_mut(); + for &mesh_entity in &**meshes_to_reextract_next_frame { + if let Ok(query_row) = all_meshes_query.get(*mesh_entity) { + extract_mesh_for_gpu_building( + query_row, + &render_visibility_ranges, + render_mesh_instances, + &mut queue, + any_gpu_culling, + ); + } + } + + // Also record info about each mesh that became invisible. for entity in removed_visibilities_query .read() .chain(removed_global_transforms_query.read()) @@ -1532,12 +1517,93 @@ pub fn extract_meshes_for_gpu_building( // Only queue a mesh for removal if we didn't pick it up above. // It's possible that a necessary component was removed and re-added in // the same frame. - if !changed_meshes_query.contains(entity) { - queue.remove(entity.into(), any_gpu_culling); + let entity = MainEntity::from(entity); + if !changed_meshes_query.contains(*entity) + && !meshes_to_reextract_next_frame.contains(&entity) + { + queue.remove(entity, any_gpu_culling); } } } +fn extract_mesh_for_gpu_building( + ( + entity, + view_visibility, + transform, + previous_transform, + lightmap, + aabb, + mesh, + tag, + no_frustum_culling, + not_shadow_receiver, + transmitted_receiver, + not_shadow_caster, + no_automatic_batching, + visibility_range, + ): ::Item<'_>, + render_visibility_ranges: &RenderVisibilityRanges, + render_mesh_instances: &RenderMeshInstancesGpu, + queue: &mut RenderMeshInstanceGpuQueue, + any_gpu_culling: bool, +) { + if !view_visibility.get() { + queue.remove(entity.into(), any_gpu_culling); + return; + } + + let mut lod_index = None; + if visibility_range { + lod_index = render_visibility_ranges.lod_index_for_entity(entity.into()); + } + + let mesh_flags = MeshFlags::from_components( + transform, + lod_index, + no_frustum_culling, + not_shadow_receiver, + transmitted_receiver, + ); + + let shared = RenderMeshInstanceShared::from_components( + previous_transform, + mesh, + tag, + not_shadow_caster, + no_automatic_batching, + ); + + let lightmap_uv_rect = pack_lightmap_uv_rect(lightmap.map(|lightmap| lightmap.uv_rect)); + + let gpu_mesh_culling_data = any_gpu_culling.then(|| MeshCullingData::new(aabb)); + + let previous_input_index = if shared + .flags + .contains(RenderMeshInstanceFlags::HAS_PREVIOUS_TRANSFORM) + { + render_mesh_instances + .get(&MainEntity::from(entity)) + .map(|render_mesh_instance| render_mesh_instance.current_uniform_index) + } else { + None + }; + + let gpu_mesh_instance_builder = RenderMeshInstanceGpuBuilder { + shared, + world_from_local: (&transform.affine()).into(), + lightmap_uv_rect, + mesh_flags, + previous_input_index, + }; + + queue.push( + entity.into(), + gpu_mesh_instance_builder, + gpu_mesh_culling_data, + ); +} + /// A system that sets the [`RenderMeshInstanceFlags`] for each mesh based on /// whether the previous frame had skins and/or morph targets. /// @@ -1552,12 +1618,12 @@ pub fn extract_meshes_for_gpu_building( /// [`crate::material::queue_material_meshes`] check the skin and morph target /// tables for each mesh, but that would be too slow in the hot mesh queuing /// loop. -fn set_mesh_motion_vector_flags( +pub(crate) fn set_mesh_motion_vector_flags( mut render_mesh_instances: ResMut, - skin_indices: Res, + skin_uniforms: Res, morph_indices: Res, ) { - for &entity in skin_indices.prev.keys() { + for &entity in skin_uniforms.all_skins() { render_mesh_instances .insert_mesh_instance_flags(entity, RenderMeshInstanceFlags::HAS_PREVIOUS_SKIN); } @@ -1577,21 +1643,26 @@ pub fn collect_meshes_for_gpu_building( mut mesh_culling_data_buffer: ResMut, mut render_mesh_instance_queues: ResMut, mesh_allocator: Res, - mesh_material_ids: Res, + mesh_material_ids: Res, render_material_bindings: Res, render_lightmaps: Res, - skin_indices: Res, + skin_uniforms: Res, + frame_count: Res, + mut meshes_to_reextract_next_frame: ResMut, ) { - let RenderMeshInstances::GpuBuilding(ref mut render_mesh_instances) = + let RenderMeshInstances::GpuBuilding(render_mesh_instances) = render_mesh_instances.into_inner() else { return; }; + // We're going to rebuild `meshes_to_reextract_next_frame`. + meshes_to_reextract_next_frame.clear(); + // Collect render mesh instances. Build up the uniform buffer. let gpu_preprocessing::BatchedInstanceBuffers { - ref mut current_input_buffer, - ref mut previous_input_buffer, + current_input_buffer, + previous_input_buffer, .. } = batched_instance_buffers.into_inner(); @@ -1619,7 +1690,9 @@ pub fn collect_meshes_for_gpu_building( &mesh_material_ids, &render_material_bindings, &render_lightmaps, - &skin_indices, + &skin_uniforms, + *frame_count, + &mut meshes_to_reextract_next_frame, ); } @@ -1637,7 +1710,7 @@ pub fn collect_meshes_for_gpu_building( ref mut removed, } => { for (entity, mesh_instance_builder, mesh_culling_builder) in changed.drain(..) { - let instance_data_index = mesh_instance_builder.update( + let Some(instance_data_index) = mesh_instance_builder.update( entity, &mut *render_mesh_instances, current_input_buffer, @@ -1646,8 +1719,12 @@ pub fn collect_meshes_for_gpu_building( &mesh_material_ids, &render_material_bindings, &render_lightmaps, - &skin_indices, - ); + &skin_uniforms, + *frame_count, + &mut meshes_to_reextract_next_frame, + ) else { + continue; + }; mesh_culling_builder .update(&mut mesh_culling_data_buffer, instance_data_index as usize); } @@ -1732,7 +1809,7 @@ impl FromWorld for MeshPipeline { let format_size = image.texture_descriptor.format.pixel_size(); render_queue.write_texture( texture.as_image_copy(), - &image.data, + image.data.as_ref().expect("Image was created without data"), TexelCopyBufferLayout { offset: 0, bytes_per_row: Some(image.width() * format_size as u32), @@ -1763,7 +1840,7 @@ impl FromWorld for MeshPipeline { &render_device, &render_adapter, ), - skins_use_uniform_buffers: skin::skins_use_uniform_buffers(&render_device), + skins_use_uniform_buffers: skins_use_uniform_buffers(&render_device), } } } @@ -1796,20 +1873,22 @@ impl GetBatchData for MeshPipeline { SRes, SRes>, SRes, - SRes, + SRes, ); // The material bind group ID, the mesh ID, and the lightmap ID, // respectively. type CompareData = ( MaterialBindGroupIndex, AssetId, - Option>, + Option, ); type BufferData = MeshUniform; fn get_batch_data( - (mesh_instances, lightmaps, _, mesh_allocator, skin_indices): &SystemParamItem, + (mesh_instances, lightmaps, _, mesh_allocator, skin_uniforms): &SystemParamItem< + Self::Param, + >, (_entity, main_entity): (Entity, MainEntity), ) -> Option<(Self::BufferData, Option)> { let RenderMeshInstances::CpuBuilding(ref mesh_instances) = **mesh_instances else { @@ -1827,9 +1906,7 @@ impl GetBatchData for MeshPipeline { }; let maybe_lightmap = lightmaps.render_lightmaps.get(&main_entity); - let current_skin_index = skin_indices.current.get(&main_entity).map(SkinIndex::index); - let previous_skin_index = skin_indices.prev.get(&main_entity).map(SkinIndex::index); - + let current_skin_index = skin_uniforms.skin_index(main_entity); let material_bind_group_index = mesh_instance.material_bindings_index; Some(( @@ -1839,12 +1916,12 @@ impl GetBatchData for MeshPipeline { material_bind_group_index.slot, maybe_lightmap.map(|lightmap| (lightmap.slot_index, lightmap.uv_rect)), current_skin_index, - previous_skin_index, + Some(mesh_instance.tag), ), mesh_instance.should_batch().then_some(( material_bind_group_index.group, mesh_instance.mesh_asset_id, - maybe_lightmap.map(|lightmap| lightmap.image), + maybe_lightmap.map(|lightmap| lightmap.slab_index), )), )) } @@ -1874,13 +1951,15 @@ impl GetFullBatchData for MeshPipeline { mesh_instance.should_batch().then_some(( mesh_instance.material_bindings_index.group, mesh_instance.mesh_asset_id, - maybe_lightmap.map(|lightmap| lightmap.image), + maybe_lightmap.map(|lightmap| lightmap.slab_index), )), )) } fn get_binned_batch_data( - (mesh_instances, lightmaps, _, mesh_allocator, skin_indices): &SystemParamItem, + (mesh_instances, lightmaps, _, mesh_allocator, skin_uniforms): &SystemParamItem< + Self::Param, + >, main_entity: MainEntity, ) -> Option { let RenderMeshInstances::CpuBuilding(ref mesh_instances) = **mesh_instances else { @@ -1897,8 +1976,7 @@ impl GetFullBatchData for MeshPipeline { }; let maybe_lightmap = lightmaps.render_lightmaps.get(&main_entity); - let current_skin_index = skin_indices.current.get(&main_entity).map(SkinIndex::index); - let previous_skin_index = skin_indices.prev.get(&main_entity).map(SkinIndex::index); + let current_skin_index = skin_uniforms.skin_index(main_entity); Some(MeshUniform::new( &mesh_instance.transforms, @@ -1906,7 +1984,7 @@ impl GetFullBatchData for MeshPipeline { mesh_instance.material_bindings_index.slot, maybe_lightmap.map(|lightmap| (lightmap.slot_index, lightmap.uv_rect)), current_skin_index, - previous_skin_index, + Some(mesh_instance.tag), )) } @@ -1929,29 +2007,28 @@ impl GetFullBatchData for MeshPipeline { } fn write_batch_indirect_parameters_metadata( - mesh_index: u32, indexed: bool, base_output_index: u32, batch_set_index: Option, - indirect_parameters_buffer: &mut IndirectParametersBuffers, + phase_indirect_parameters_buffers: &mut UntypedPhaseIndirectParametersBuffers, indirect_parameters_offset: u32, ) { - let indirect_parameters = IndirectParametersMetadata { - mesh_index, + let indirect_parameters = IndirectParametersCpuMetadata { base_output_index, batch_set_index: match batch_set_index { Some(batch_set_index) => u32::from(batch_set_index), None => !0, }, - early_instance_count: 0, - late_instance_count: 0, }; if indexed { - indirect_parameters_buffer.set_indexed(indirect_parameters_offset, indirect_parameters); + phase_indirect_parameters_buffers + .indexed + .set(indirect_parameters_offset, indirect_parameters); } else { - indirect_parameters_buffer - .set_non_indexed(indirect_parameters_offset, indirect_parameters); + phase_indirect_parameters_buffers + .non_indexed + .set(indirect_parameters_offset, indirect_parameters); } } } @@ -2544,9 +2621,12 @@ impl SpecializedMeshPipeline for MeshPipeline { } } -/// Bind groups for meshes currently loaded. -#[derive(Resource, Default)] -pub struct MeshBindGroups { +/// The bind groups for meshes currently loaded. +/// +/// If GPU mesh preprocessing isn't in use, these are global to the scene. If +/// GPU mesh preprocessing is in use, these are specific to a single phase. +#[derive(Default)] +pub struct MeshPhaseBindGroups { model_only: Option, skinned: Option, morph_targets: HashMap, MeshBindGroupPair>, @@ -2558,7 +2638,18 @@ pub struct MeshBindGroupPair { no_motion_vectors: BindGroup, } -impl MeshBindGroups { +/// All bind groups for meshes currently loaded. +#[derive(Resource)] +pub enum MeshBindGroups { + /// The bind groups for the meshes for the entire scene, if GPU mesh + /// preprocessing isn't in use. + CpuPreprocessing(MeshPhaseBindGroups), + /// A mapping from the type ID of a phase (e.g. [`Opaque3d`]) to the mesh + /// bind groups for that phase. + GpuPreprocessing(TypeIdMap), +} + +impl MeshPhaseBindGroups { pub fn reset(&mut self) { self.model_only = None; self.skinned = None; @@ -2600,9 +2691,10 @@ impl MeshBindGroupPair { } } -pub fn prepare_mesh_bind_group( +/// Creates the per-mesh bind groups for each type of mesh and each phase. +pub fn prepare_mesh_bind_groups( + mut commands: Commands, meshes: Res>, - mut groups: ResMut, mesh_pipeline: Res, render_device: Res, cpu_batched_instance_buffer: Option< @@ -2615,36 +2707,88 @@ pub fn prepare_mesh_bind_group( weights_uniform: Res, mut render_lightmaps: ResMut, ) { - groups.reset(); + // CPU mesh preprocessing path. + if let Some(cpu_batched_instance_buffer) = cpu_batched_instance_buffer { + if let Some(instance_data_binding) = cpu_batched_instance_buffer + .into_inner() + .instance_data_binding() + { + // In this path, we only have a single set of bind groups for all phases. + let cpu_preprocessing_mesh_bind_groups = prepare_mesh_bind_groups_for_phase( + instance_data_binding, + &meshes, + &mesh_pipeline, + &render_device, + &skins_uniform, + &weights_uniform, + &mut render_lightmaps, + ); + commands.insert_resource(MeshBindGroups::CpuPreprocessing( + cpu_preprocessing_mesh_bind_groups, + )); + return; + } + } + + // GPU mesh preprocessing path. + if let Some(gpu_batched_instance_buffers) = gpu_batched_instance_buffers { + let mut gpu_preprocessing_mesh_bind_groups = TypeIdMap::default(); + + // Loop over each phase. + for (phase_type_id, batched_phase_instance_buffers) in + &gpu_batched_instance_buffers.phase_instance_buffers + { + let Some(instance_data_binding) = + batched_phase_instance_buffers.instance_data_binding() + else { + continue; + }; + + let mesh_phase_bind_groups = prepare_mesh_bind_groups_for_phase( + instance_data_binding, + &meshes, + &mesh_pipeline, + &render_device, + &skins_uniform, + &weights_uniform, + &mut render_lightmaps, + ); + + gpu_preprocessing_mesh_bind_groups.insert(*phase_type_id, mesh_phase_bind_groups); + } + + commands.insert_resource(MeshBindGroups::GpuPreprocessing( + gpu_preprocessing_mesh_bind_groups, + )); + } +} + +/// Creates the per-mesh bind groups for each type of mesh, for a single phase. +fn prepare_mesh_bind_groups_for_phase( + model: BindingResource, + meshes: &RenderAssets, + mesh_pipeline: &MeshPipeline, + render_device: &RenderDevice, + skins_uniform: &SkinUniforms, + weights_uniform: &MorphUniforms, + render_lightmaps: &mut RenderLightmaps, +) -> MeshPhaseBindGroups { let layouts = &mesh_pipeline.mesh_layouts; - let model = if let Some(cpu_batched_instance_buffer) = cpu_batched_instance_buffer { - cpu_batched_instance_buffer - .into_inner() - .instance_data_binding() - } else if let Some(gpu_batched_instance_buffers) = gpu_batched_instance_buffers { - gpu_batched_instance_buffers - .into_inner() - .instance_data_binding() - } else { - return; + // TODO: Reuse allocations. + let mut groups = MeshPhaseBindGroups { + model_only: Some(layouts.model_only(render_device, &model)), + ..default() }; - let Some(model) = model else { return }; - - groups.model_only = Some(layouts.model_only(&render_device, &model)); // Create the skinned mesh bind group with the current and previous buffers - // (the latter being for motion vector computation). If there's no previous - // buffer, just use the current one as the shader will ignore it. - let skin = skins_uniform.current_buffer.buffer(); - if let Some(skin) = skin { - let prev_skin = skins_uniform.prev_buffer.buffer().unwrap_or(skin); - groups.skinned = Some(MeshBindGroupPair { - motion_vectors: layouts.skinned_motion(&render_device, &model, skin, prev_skin), - no_motion_vectors: layouts.skinned(&render_device, &model, skin), - }); - } + // (the latter being for motion vector computation). + let (skin, prev_skin) = (&skins_uniform.current_buffer, &skins_uniform.prev_buffer); + groups.skinned = Some(MeshBindGroupPair { + motion_vectors: layouts.skinned_motion(render_device, &model, skin, prev_skin), + no_motion_vectors: layouts.skinned(render_device, &model, skin), + }); // Create the morphed bind groups just like we did for the skinned bind // group. @@ -2652,43 +2796,37 @@ pub fn prepare_mesh_bind_group( let prev_weights = weights_uniform.prev_buffer.buffer().unwrap_or(weights); for (id, gpu_mesh) in meshes.iter() { if let Some(targets) = gpu_mesh.morph_targets.as_ref() { - let bind_group_pair = match skin.filter(|_| is_skinned(&gpu_mesh.layout)) { - Some(skin) => { - let prev_skin = skins_uniform.prev_buffer.buffer().unwrap_or(skin); - MeshBindGroupPair { - motion_vectors: layouts.morphed_skinned_motion( - &render_device, - &model, - skin, - weights, - targets, - prev_skin, - prev_weights, - ), - no_motion_vectors: layouts.morphed_skinned( - &render_device, - &model, - skin, - weights, - targets, - ), - } + let bind_group_pair = if is_skinned(&gpu_mesh.layout) { + let prev_skin = &skins_uniform.prev_buffer; + MeshBindGroupPair { + motion_vectors: layouts.morphed_skinned_motion( + render_device, + &model, + skin, + weights, + targets, + prev_skin, + prev_weights, + ), + no_motion_vectors: layouts.morphed_skinned( + render_device, + &model, + skin, + weights, + targets, + ), } - None => MeshBindGroupPair { + } else { + MeshBindGroupPair { motion_vectors: layouts.morphed_motion( - &render_device, + render_device, &model, weights, targets, prev_weights, ), - no_motion_vectors: layouts.morphed( - &render_device, - &model, - weights, - targets, - ), - }, + no_motion_vectors: layouts.morphed(render_device, &model, weights, targets), + } }; groups.morph_targets.insert(id, bind_group_pair); } @@ -2700,9 +2838,11 @@ pub fn prepare_mesh_bind_group( for (lightmap_slab_id, lightmap_slab) in render_lightmaps.slabs.iter_mut().enumerate() { groups.lightmaps.insert( LightmapSlabIndex(NonMaxU32::new(lightmap_slab_id as u32).unwrap()), - layouts.lightmapped(&render_device, &model, lightmap_slab, bindless_supported), + layouts.lightmapped(render_device, &model, lightmap_slab, bindless_supported), ); } + + groups } pub struct SetMeshViewBindGroup; @@ -2760,7 +2900,7 @@ impl RenderCommand

for SetMeshBindGroup { SRes, SRes, SRes, - SRes, + SRes, SRes, SRes, ); @@ -2776,7 +2916,7 @@ impl RenderCommand

for SetMeshBindGroup { render_device, bind_groups, mesh_instances, - skin_indices, + skin_uniforms, morph_indices, lightmaps, ): SystemParamItem<'w, '_, Self::Param>, @@ -2784,7 +2924,7 @@ impl RenderCommand

for SetMeshBindGroup { ) -> RenderCommandResult { let bind_groups = bind_groups.into_inner(); let mesh_instances = mesh_instances.into_inner(); - let skin_indices = skin_indices.into_inner(); + let skin_uniforms = skin_uniforms.into_inner(); let morph_indices = morph_indices.into_inner(); let entity = &item.main_entity(); @@ -2793,12 +2933,11 @@ impl RenderCommand

for SetMeshBindGroup { return RenderCommandResult::Success; }; - let current_skin_index = skin_indices.current.get(entity); - let prev_skin_index = skin_indices.prev.get(entity); + let current_skin_byte_offset = skin_uniforms.skin_byte_offset(*entity); let current_morph_index = morph_indices.current.get(entity); let prev_morph_index = morph_indices.prev.get(entity); - let is_skinned = current_skin_index.is_some(); + let is_skinned = current_skin_byte_offset.is_some(); let is_morphed = current_morph_index.is_some(); let lightmap_slab_index = lightmaps @@ -2806,7 +2945,20 @@ impl RenderCommand

for SetMeshBindGroup { .get(entity) .map(|render_lightmap| render_lightmap.slab_index); - let Some(bind_group) = bind_groups.get( + let Some(mesh_phase_bind_groups) = (match *bind_groups { + MeshBindGroups::CpuPreprocessing(ref mesh_phase_bind_groups) => { + Some(mesh_phase_bind_groups) + } + MeshBindGroups::GpuPreprocessing(ref mesh_phase_bind_groups) => { + mesh_phase_bind_groups.get(&TypeId::of::

()) + } + }) else { + // This is harmless if e.g. we're rendering the `Shadow` phase and + // there weren't any shadows. + return RenderCommandResult::Success; + }; + + let Some(bind_group) = mesh_phase_bind_groups.get( mesh_asset_id, lightmap_slab_index, is_skinned, @@ -2826,8 +2978,8 @@ impl RenderCommand

for SetMeshBindGroup { dynamic_offsets[offset_count] = dynamic_offset; offset_count += 1; } - if let Some(current_skin_index) = current_skin_index { - if skin::skins_use_uniform_buffers(&render_device) { + if let Some(current_skin_index) = current_skin_byte_offset { + if skins_use_uniform_buffers(&render_device) { dynamic_offsets[offset_count] = current_skin_index.byte_offset; offset_count += 1; } @@ -2839,16 +2991,12 @@ impl RenderCommand

for SetMeshBindGroup { // Attach motion vectors if needed. if has_motion_vector_prepass { - // Attach the previous skin index for motion vector computation. If - // there isn't one, just use zero as the shader will ignore it. - if current_skin_index.is_some() && skin::skins_use_uniform_buffers(&render_device) { - match prev_skin_index { - Some(prev_skin_index) => { - dynamic_offsets[offset_count] = prev_skin_index.byte_offset; - } - None => dynamic_offsets[offset_count] = 0, + // Attach the previous skin index for motion vector computation. + if skins_use_uniform_buffers(&render_device) { + if let Some(current_skin_byte_offset) = current_skin_byte_offset { + dynamic_offsets[offset_count] = current_skin_byte_offset.byte_offset; + offset_count += 1; } - offset_count += 1; } // Attach the previous morph index for motion vector computation. If @@ -2879,6 +3027,7 @@ impl RenderCommand

for DrawMesh { SRes, SRes, Option>, + SRes, ); type ViewQuery = Has; type ItemQuery = (); @@ -2894,6 +3043,7 @@ impl RenderCommand

for DrawMesh { pipeline_cache, mesh_allocator, preprocess_pipelines, + preprocessing_support, ): SystemParamItem<'w, '_, Self::Param>, pass: &mut TrackedRenderPass<'w>, ) -> RenderCommandResult { @@ -2902,7 +3052,8 @@ impl RenderCommand

for DrawMesh { // it's compiled. Otherwise, our mesh instance data won't be present. if let Some(preprocess_pipelines) = preprocess_pipelines { if !has_preprocess_bind_group - || !preprocess_pipelines.pipelines_are_loaded(&pipeline_cache) + || !preprocess_pipelines + .pipelines_are_loaded(&pipeline_cache, &preprocessing_support) { return RenderCommandResult::Skip; } @@ -2958,9 +3109,20 @@ impl RenderCommand

for DrawMesh { // Look up the indirect parameters buffer, as well as // the buffer we're going to use for // `multi_draw_indexed_indirect_count` (if available). + let Some(phase_indirect_parameters_buffers) = + indirect_parameters_buffer.get(&TypeId::of::

()) + else { + warn!( + "Not rendering mesh because indexed indirect parameters buffer \ + wasn't present for this phase", + ); + return RenderCommandResult::Skip; + }; let (Some(indirect_parameters_buffer), Some(batch_sets_buffer)) = ( - indirect_parameters_buffer.indexed_data_buffer(), - indirect_parameters_buffer.indexed_batch_sets_buffer(), + phase_indirect_parameters_buffers.indexed.data_buffer(), + phase_indirect_parameters_buffers + .indexed + .batch_sets_buffer(), ) else { warn!( "Not rendering mesh because indexed indirect parameters buffer \ @@ -3015,9 +3177,20 @@ impl RenderCommand

for DrawMesh { // Look up the indirect parameters buffer, as well as the // buffer we're going to use for // `multi_draw_indirect_count` (if available). + let Some(phase_indirect_parameters_buffers) = + indirect_parameters_buffer.get(&TypeId::of::

()) + else { + warn!( + "Not rendering mesh because non-indexed indirect parameters buffer \ + wasn't present for this phase", + ); + return RenderCommandResult::Skip; + }; let (Some(indirect_parameters_buffer), Some(batch_sets_buffer)) = ( - indirect_parameters_buffer.non_indexed_data_buffer(), - indirect_parameters_buffer.non_indexed_batch_sets_buffer(), + phase_indirect_parameters_buffers.non_indexed.data_buffer(), + phase_indirect_parameters_buffers + .non_indexed + .batch_sets_buffer(), ) else { warn!( "Not rendering mesh because non-indexed indirect parameters buffer \ diff --git a/crates/bevy_pbr/src/render/mesh_functions.wgsl b/crates/bevy_pbr/src/render/mesh_functions.wgsl index 23857bc6aa..6d4c53a19f 100644 --- a/crates/bevy_pbr/src/render/mesh_functions.wgsl +++ b/crates/bevy_pbr/src/render/mesh_functions.wgsl @@ -22,6 +22,33 @@ fn get_previous_world_from_local(instance_index: u32) -> mat4x4 { return affine3_to_square(mesh[instance_index].previous_world_from_local); } +fn get_local_from_world(instance_index: u32) -> mat4x4 { + // the model matrix is translation * rotation * scale + // the inverse is then scale^-1 * rotation ^-1 * translation^-1 + // the 3x3 matrix only contains the information for the rotation and scale + let inverse_model_3x3 = transpose(mat2x4_f32_to_mat3x3_unpack( + mesh[instance_index].local_from_world_transpose_a, + mesh[instance_index].local_from_world_transpose_b, + )); + // construct scale^-1 * rotation^-1 from the 3x3 + let inverse_model_4x4_no_trans = mat4x4( + vec4(inverse_model_3x3[0], 0.0), + vec4(inverse_model_3x3[1], 0.0), + vec4(inverse_model_3x3[2], 0.0), + vec4(0.0,0.0,0.0,1.0) + ); + // we can get translation^-1 by negating the translation of the model + let model = get_world_from_local(instance_index); + let inverse_model_4x4_only_trans = mat4x4( + vec4(1.0,0.0,0.0,0.0), + vec4(0.0,1.0,0.0,0.0), + vec4(0.0,0.0,1.0,0.0), + vec4(-model[3].xyz, 1.0) + ); + + return inverse_model_4x4_no_trans * inverse_model_4x4_only_trans; +} + #endif // MESHLET_MESH_MATERIAL_PASS fn mesh_position_local_to_world(world_from_local: mat4x4, vertex_position: vec4) -> vec4 { @@ -132,3 +159,10 @@ fn get_visibility_range_dither_level(instance_index: u32, world_position: vec4 u32 { + return mesh[instance_index].tag; +} +#endif diff --git a/crates/bevy_pbr/src/render/mesh_preprocess.wgsl b/crates/bevy_pbr/src/render/mesh_preprocess.wgsl index 315dd13d3f..543b328aaa 100644 --- a/crates/bevy_pbr/src/render/mesh_preprocess.wgsl +++ b/crates/bevy_pbr/src/render/mesh_preprocess.wgsl @@ -14,7 +14,9 @@ // are known as *early mesh preprocessing* and *late mesh preprocessing* // respectively. -#import bevy_pbr::mesh_preprocess_types::{IndirectParametersMetadata, MeshInput} +#import bevy_pbr::mesh_preprocess_types::{ + IndirectParametersCpuMetadata, IndirectParametersGpuMetadata, MeshInput +} #import bevy_pbr::mesh_types::{Mesh, MESH_FLAGS_NO_FRUSTUM_CULLING_BIT} #import bevy_pbr::mesh_view_bindings::view #import bevy_pbr::occlusion_culling @@ -43,11 +45,10 @@ struct PreprocessWorkItem { // The index of the `MeshInput` in the `current_input` buffer that we read // from. input_index: u32, - // The index of the `Mesh` in `output` that we write to. - output_index: u32, - // The index of the `IndirectParameters` in `indirect_parameters` that we - // write to. - indirect_parameters_index: u32, + // In direct mode, the index of the `Mesh` in `output` that we write to. In + // indirect mode, the index of the `IndirectParameters` in + // `indirect_parameters` that we write to. + output_or_indirect_parameters_index: u32, } // The parameters for the indirect compute dispatch for the late mesh @@ -91,15 +92,18 @@ struct PushConstants { #ifdef INDIRECT // The array of indirect parameters for drawcalls. -@group(0) @binding(7) var indirect_parameters_metadata: - array; +@group(0) @binding(7) var indirect_parameters_cpu_metadata: + array; + +@group(0) @binding(8) var indirect_parameters_gpu_metadata: + array; #endif #ifdef FRUSTUM_CULLING // Data needed to cull the meshes. // // At the moment, this consists only of AABBs. -@group(0) @binding(8) var mesh_culling_data: array; +@group(0) @binding(9) var mesh_culling_data: array; #endif // FRUSTUM_CULLING #ifdef OCCLUSION_CULLING @@ -132,9 +136,9 @@ fn view_frustum_intersects_obb( let relative_radius = dot( abs( vec3( - dot(plane_normal, world_from_local[0]), - dot(plane_normal, world_from_local[1]), - dot(plane_normal, world_from_local[2]), + dot(plane_normal.xyz, world_from_local[0].xyz), + dot(plane_normal.xyz, world_from_local[1].xyz), + dot(plane_normal.xyz, world_from_local[2].xyz), ) ), aabb_half_extents @@ -171,8 +175,21 @@ fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { // Unpack the work item. let input_index = work_items[instance_index].input_index; - let output_index = work_items[instance_index].output_index; - let indirect_parameters_index = work_items[instance_index].indirect_parameters_index; +#ifdef INDIRECT + let indirect_parameters_index = work_items[instance_index].output_or_indirect_parameters_index; + + // If we're the first mesh instance in this batch, write the index of our + // `MeshInput` into the appropriate slot so that the indirect parameters + // building shader can access it. +#ifndef LATE_PHASE + if (instance_index == 0u || work_items[instance_index - 1].output_or_indirect_parameters_index != indirect_parameters_index) { + indirect_parameters_gpu_metadata[indirect_parameters_index].mesh_index = input_index; + } +#endif // LATE_PHASE + +#else // INDIRECT + let mesh_output_index = work_items[instance_index].output_or_indirect_parameters_index; +#endif // INDIRECT // Unpack the input matrix. let world_from_local_affine_transpose = current_input[input_index].world_from_local; @@ -192,14 +209,22 @@ fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { } #endif - // Look up the previous model matrix. + // See whether the `MeshInputUniform` was updated on this frame. If it + // wasn't, then we know the transforms of this mesh must be identical to + // those on the previous frame, and therefore we don't need to access the + // `previous_input_index` (in fact, we can't; that index are only valid for + // one frame and will be invalid). + let timestamp = current_input[input_index].timestamp; + let mesh_changed_this_frame = timestamp == view.frame_count; + + // Look up the previous model matrix, if it could have been. let previous_input_index = current_input[input_index].previous_input_index; var previous_world_from_local_affine_transpose: mat3x4; - if (previous_input_index == 0xffffffff) { - previous_world_from_local_affine_transpose = world_from_local_affine_transpose; - } else { + if (mesh_changed_this_frame && previous_input_index != 0xffffffffu) { previous_world_from_local_affine_transpose = previous_input[previous_input_index].world_from_local; + } else { + previous_world_from_local_affine_transpose = world_from_local_affine_transpose; } let previous_world_from_local = maths::affine3_to_square(previous_world_from_local_affine_transpose); @@ -289,8 +314,7 @@ fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { // Enqueue a work item for the late prepass phase. late_preprocess_work_items[output_work_item_index].input_index = input_index; - late_preprocess_work_items[output_work_item_index].output_index = output_index; - late_preprocess_work_items[output_work_item_index].indirect_parameters_index = + late_preprocess_work_items[output_work_item_index].output_or_indirect_parameters_index = indirect_parameters_index; #endif // EARLY_PHASE // This mesh is culled. Skip it. @@ -314,22 +338,23 @@ fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { // parameters. Otherwise, this index was directly supplied to us. #ifdef INDIRECT #ifdef LATE_PHASE - let batch_output_index = - atomicLoad(&indirect_parameters_metadata[indirect_parameters_index].early_instance_count) + - atomicAdd(&indirect_parameters_metadata[indirect_parameters_index].late_instance_count, 1u); + let batch_output_index = atomicLoad( + &indirect_parameters_gpu_metadata[indirect_parameters_index].early_instance_count + ) + atomicAdd( + &indirect_parameters_gpu_metadata[indirect_parameters_index].late_instance_count, + 1u + ); #else // LATE_PHASE let batch_output_index = atomicAdd( - &indirect_parameters_metadata[indirect_parameters_index].early_instance_count, + &indirect_parameters_gpu_metadata[indirect_parameters_index].early_instance_count, 1u ); #endif // LATE_PHASE let mesh_output_index = - indirect_parameters_metadata[indirect_parameters_index].base_output_index + + indirect_parameters_cpu_metadata[indirect_parameters_index].base_output_index + batch_output_index; -#else // INDIRECT - let mesh_output_index = output_index; #endif // INDIRECT // Write the output. @@ -342,7 +367,7 @@ fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { output[mesh_output_index].lightmap_uv_rect = current_input[input_index].lightmap_uv_rect; output[mesh_output_index].first_vertex_index = current_input[input_index].first_vertex_index; output[mesh_output_index].current_skin_index = current_input[input_index].current_skin_index; - output[mesh_output_index].previous_skin_index = current_input[input_index].previous_skin_index; output[mesh_output_index].material_and_lightmap_bind_group_slot = current_input[input_index].material_and_lightmap_bind_group_slot; + output[mesh_output_index].tag = current_input[input_index].tag; } diff --git a/crates/bevy_pbr/src/render/mesh_types.wgsl b/crates/bevy_pbr/src/render/mesh_types.wgsl index f0258770c6..502b91b427 100644 --- a/crates/bevy_pbr/src/render/mesh_types.wgsl +++ b/crates/bevy_pbr/src/render/mesh_types.wgsl @@ -18,10 +18,12 @@ struct Mesh { // The index of the mesh's first vertex in the vertex buffer. first_vertex_index: u32, current_skin_index: u32, - previous_skin_index: u32, // Low 16 bits: index of the material inside the bind group data. // High 16 bits: index of the lightmap in the binding array. material_and_lightmap_bind_group_slot: u32, + // User supplied index to identify the mesh instance + tag: u32, + pad: u32, }; #ifdef SKINNED diff --git a/crates/bevy_pbr/src/render/mesh_view_bindings.rs b/crates/bevy_pbr/src/render/mesh_view_bindings.rs index bd759a1296..8e231886ba 100644 --- a/crates/bevy_pbr/src/render/mesh_view_bindings.rs +++ b/crates/bevy_pbr/src/render/mesh_view_bindings.rs @@ -1,7 +1,7 @@ use alloc::sync::Arc; use bevy_core_pipeline::{ core_3d::ViewTransmissionTexture, - oit::{OitBuffers, OrderIndependentTransparencySettings}, + oit::{resolve::is_oit_supported, OitBuffers, OrderIndependentTransparencySettings}, prepass::ViewPrepassTextures, tonemapping::{ get_lut_bind_group_layout_entries, get_lut_bindings, Tonemapping, TonemappingLuts, @@ -216,7 +216,7 @@ fn layout_entries( ( 2, #[cfg(all( - not(feature = "ios_simulator"), + not(target_abi = "sim"), any( not(feature = "webgl"), not(target_arch = "wasm32"), @@ -225,7 +225,7 @@ fn layout_entries( ))] texture_cube_array(TextureSampleType::Depth), #[cfg(any( - feature = "ios_simulator", + target_abi = "sim", all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")) ))] texture_cube(TextureSampleType::Depth), @@ -380,15 +380,10 @@ fn layout_entries( // OIT if layout_key.contains(MeshPipelineViewLayoutKey::OIT_ENABLED) { - // Check if the GPU supports writable storage buffers in the fragment shader - // If not, we can't use OIT, so we skip the OIT bindings. - // This is a hack to avoid errors on webgl -- the OIT plugin will warn the user that OIT - // is not supported on their platform, so we don't need to do it here. - if render_adapter - .get_downlevel_capabilities() - .flags - .contains(DownlevelFlags::FRAGMENT_WRITABLE_STORAGE) - { + // Check if we can use OIT. This is a hack to avoid errors on webgl -- + // the OIT plugin will warn the user that OIT is not supported on their + // platform, so we don't need to do it here. + if is_oit_supported(render_adapter, render_device, false) { entries = entries.extend_with_indices(( // oit_layers (34, storage_buffer_sized(false, None)), diff --git a/crates/bevy_pbr/src/render/mod.rs b/crates/bevy_pbr/src/render/mod.rs index 8e26e869a1..6a29823022 100644 --- a/crates/bevy_pbr/src/render/mod.rs +++ b/crates/bevy_pbr/src/render/mod.rs @@ -13,4 +13,5 @@ pub use light::*; pub use mesh::*; pub use mesh_bindings::MeshLayouts; pub use mesh_view_bindings::*; -pub use skin::{extract_skins, prepare_skins, SkinIndices, SkinUniforms, MAX_JOINTS}; +pub use morph::*; +pub use skin::{extract_skins, prepare_skins, skins_use_uniform_buffers, SkinUniforms, MAX_JOINTS}; diff --git a/crates/bevy_pbr/src/render/morph.rs b/crates/bevy_pbr/src/render/morph.rs index 4b1ed68ce8..29070724dd 100644 --- a/crates/bevy_pbr/src/render/morph.rs +++ b/crates/bevy_pbr/src/render/morph.rs @@ -14,7 +14,7 @@ use bytemuck::NoUninit; #[derive(Component)] pub struct MorphIndex { - pub(super) index: u32, + pub index: u32, } /// Maps each mesh affected by morph targets to the applicable offset within the diff --git a/crates/bevy_pbr/src/render/parallax_mapping.wgsl b/crates/bevy_pbr/src/render/parallax_mapping.wgsl index 780b5c290a..9005734da5 100644 --- a/crates/bevy_pbr/src/render/parallax_mapping.wgsl +++ b/crates/bevy_pbr/src/render/parallax_mapping.wgsl @@ -1,10 +1,16 @@ #define_import_path bevy_pbr::parallax_mapping +#import bevy_render::bindless::{bindless_samplers_filtering, bindless_textures_2d} + #import bevy_pbr::{ pbr_bindings::{depth_map_texture, depth_map_sampler}, mesh_bindings::mesh } +#ifdef BINDLESS +#import bevy_pbr::pbr_bindings::material_indices +#endif // BINDLESS + fn sample_depth_map(uv: vec2, material_bind_group_slot: u32) -> f32 { // We use `textureSampleLevel` over `textureSample` because the wgpu DX12 // backend (Fxc) panics when using "gradient instructions" inside a loop. @@ -18,8 +24,8 @@ fn sample_depth_map(uv: vec2, material_bind_group_slot: u32) -> f32 { // See https://stackoverflow.com/questions/56581141/direct3d11-gradient-instruction-used-in-a-loop-with-varying-iteration-forcing return textureSampleLevel( #ifdef BINDLESS - depth_map_texture[material_bind_group_slot], - depth_map_sampler[material_bind_group_slot], + bindless_textures_2d[material_indices[material_bind_group_slot].depth_map_texture], + bindless_samplers_filtering[material_indices[material_bind_group_slot].depth_map_sampler], #else // BINDLESS depth_map_texture, depth_map_sampler, diff --git a/crates/bevy_pbr/src/render/pbr_bindings.wgsl b/crates/bevy_pbr/src/render/pbr_bindings.wgsl index d6514acfa9..fac7b97265 100644 --- a/crates/bevy_pbr/src/render/pbr_bindings.wgsl +++ b/crates/bevy_pbr/src/render/pbr_bindings.wgsl @@ -3,20 +3,45 @@ #import bevy_pbr::pbr_types::StandardMaterial #ifdef BINDLESS -@group(2) @binding(0) var material: binding_array; -@group(2) @binding(1) var base_color_texture: binding_array, 16>; -@group(2) @binding(2) var base_color_sampler: binding_array; -@group(2) @binding(3) var emissive_texture: binding_array, 16>; -@group(2) @binding(4) var emissive_sampler: binding_array; -@group(2) @binding(5) var metallic_roughness_texture: binding_array, 16>; -@group(2) @binding(6) var metallic_roughness_sampler: binding_array; -@group(2) @binding(7) var occlusion_texture: binding_array, 16>; -@group(2) @binding(8) var occlusion_sampler: binding_array; -@group(2) @binding(9) var normal_map_texture: binding_array, 16>; -@group(2) @binding(10) var normal_map_sampler: binding_array; -@group(2) @binding(11) var depth_map_texture: binding_array, 16>; -@group(2) @binding(12) var depth_map_sampler: binding_array; +struct StandardMaterialBindings { + material: u32, // 0 + base_color_texture: u32, // 1 + base_color_sampler: u32, // 2 + emissive_texture: u32, // 3 + emissive_sampler: u32, // 4 + metallic_roughness_texture: u32, // 5 + metallic_roughness_sampler: u32, // 6 + occlusion_texture: u32, // 7 + occlusion_sampler: u32, // 8 + normal_map_texture: u32, // 9 + normal_map_sampler: u32, // 10 + depth_map_texture: u32, // 11 + depth_map_sampler: u32, // 12 + anisotropy_texture: u32, // 13 + anisotropy_sampler: u32, // 14 + specular_transmission_texture: u32, // 15 + specular_transmission_sampler: u32, // 16 + thickness_texture: u32, // 17 + thickness_sampler: u32, // 18 + diffuse_transmission_texture: u32, // 19 + diffuse_transmission_sampler: u32, // 20 + clearcoat_texture: u32, // 21 + clearcoat_sampler: u32, // 22 + clearcoat_roughness_texture: u32, // 23 + clearcoat_roughness_sampler: u32, // 24 + clearcoat_normal_texture: u32, // 25 + clearcoat_normal_sampler: u32, // 26 + specular_texture: u32, // 27 + specular_sampler: u32, // 28 + specular_tint_texture: u32, // 29 + specular_tint_sampler: u32, // 30 +} + +@group(2) @binding(0) var material_indices: array; +@group(2) @binding(10) var material_array: array; + #else // BINDLESS + @group(2) @binding(0) var material: StandardMaterial; @group(2) @binding(1) var base_color_texture: texture_2d; @group(2) @binding(2) var base_color_sampler: sampler; @@ -30,64 +55,35 @@ @group(2) @binding(10) var normal_map_sampler: sampler; @group(2) @binding(11) var depth_map_texture: texture_2d; @group(2) @binding(12) var depth_map_sampler: sampler; -#endif // BINDLESS #ifdef PBR_ANISOTROPY_TEXTURE_SUPPORTED -#ifdef BINDLESS -@group(2) @binding(13) var anisotropy_texture: binding_array, 16>; -@group(2) @binding(14) var anisotropy_sampler: binding_array; -#else // BINDLESS @group(2) @binding(13) var anisotropy_texture: texture_2d; @group(2) @binding(14) var anisotropy_sampler: sampler; -#endif // BINDLESS #endif // PBR_ANISOTROPY_TEXTURE_SUPPORTED #ifdef PBR_TRANSMISSION_TEXTURES_SUPPORTED -#ifdef BINDLESS -@group(2) @binding(15) var specular_transmission_texture: binding_array, 16>; -@group(2) @binding(16) var specular_transmission_sampler: binding_array; -@group(2) @binding(17) var thickness_texture: binding_array, 16>; -@group(2) @binding(18) var thickness_sampler: binding_array; -@group(2) @binding(19) var diffuse_transmission_texture: binding_array, 16>; -@group(2) @binding(20) var diffuse_transmission_sampler: binding_array; -#else // BINDLESS @group(2) @binding(15) var specular_transmission_texture: texture_2d; @group(2) @binding(16) var specular_transmission_sampler: sampler; @group(2) @binding(17) var thickness_texture: texture_2d; @group(2) @binding(18) var thickness_sampler: sampler; @group(2) @binding(19) var diffuse_transmission_texture: texture_2d; @group(2) @binding(20) var diffuse_transmission_sampler: sampler; -#endif // BINDLESS #endif // PBR_TRANSMISSION_TEXTURES_SUPPORTED #ifdef PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED -#ifdef BINDLESS -@group(2) @binding(21) var clearcoat_texture: binding_array, 16>; -@group(2) @binding(22) var clearcoat_sampler: binding_array; -@group(2) @binding(23) var clearcoat_roughness_texture: binding_array, 16>; -@group(2) @binding(24) var clearcoat_roughness_sampler: binding_array; -@group(2) @binding(25) var clearcoat_normal_texture: binding_array, 16>; -@group(2) @binding(26) var clearcoat_normal_sampler: binding_array; -#else // BINDLESS @group(2) @binding(21) var clearcoat_texture: texture_2d; @group(2) @binding(22) var clearcoat_sampler: sampler; @group(2) @binding(23) var clearcoat_roughness_texture: texture_2d; @group(2) @binding(24) var clearcoat_roughness_sampler: sampler; @group(2) @binding(25) var clearcoat_normal_texture: texture_2d; @group(2) @binding(26) var clearcoat_normal_sampler: sampler; -#endif // BINDLESS #endif // PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED #ifdef PBR_SPECULAR_TEXTURES_SUPPORTED -#ifdef BINDLESS -@group(2) @binding(27) var specular_texture: binding_array, 16>; -@group(2) @binding(28) var specular_sampler: binding_array; -@group(2) @binding(29) var specular_tint_texture: binding_array, 16>; -@group(2) @binding(30) var specular_tint_sampler: binding_array; -#else @group(2) @binding(27) var specular_texture: texture_2d; @group(2) @binding(28) var specular_sampler: sampler; @group(2) @binding(29) var specular_tint_texture: texture_2d; @group(2) @binding(30) var specular_tint_sampler: sampler; -#endif // BINDLESS #endif // PBR_SPECULAR_TEXTURES_SUPPORTED + +#endif // BINDLESS diff --git a/crates/bevy_pbr/src/render/pbr_fragment.wgsl b/crates/bevy_pbr/src/render/pbr_fragment.wgsl index ac68e9f0aa..779546f8bd 100644 --- a/crates/bevy_pbr/src/render/pbr_fragment.wgsl +++ b/crates/bevy_pbr/src/render/pbr_fragment.wgsl @@ -1,5 +1,7 @@ #define_import_path bevy_pbr::pbr_fragment +#import bevy_render::bindless::{bindless_samplers_filtering, bindless_textures_2d} + #import bevy_pbr::{ pbr_functions, pbr_functions::SampleBias, @@ -26,6 +28,10 @@ #import bevy_pbr::forward_io::VertexOutput #endif +#ifdef BINDLESS +#import bevy_pbr::pbr_bindings::material_indices +#endif // BINDLESS + // prepare a basic PbrInput from the vertex stage output, mesh binding and view binding fn pbr_input_from_vertex_output( in: VertexOutput, @@ -70,17 +76,17 @@ fn pbr_input_from_standard_material( in: VertexOutput, is_front: bool, ) -> pbr_types::PbrInput { -#ifdef BINDLESS #ifdef MESHLET_MESH_MATERIAL_PASS let slot = in.material_bind_group_slot; #else // MESHLET_MESH_MATERIAL_PASS let slot = mesh[in.instance_index].material_and_lightmap_bind_group_slot & 0xffffu; #endif // MESHLET_MESH_MATERIAL_PASS - let flags = pbr_bindings::material[slot].flags; - let base_color = pbr_bindings::material[slot].base_color; - let deferred_lighting_pass_id = pbr_bindings::material[slot].deferred_lighting_pass_id; +#ifdef BINDLESS + let flags = pbr_bindings::material_array[material_indices[slot].material].flags; + let base_color = pbr_bindings::material_array[material_indices[slot].material].base_color; + let deferred_lighting_pass_id = + pbr_bindings::material_array[material_indices[slot].material].deferred_lighting_pass_id; #else // BINDLESS - let slot = mesh[in.instance_index].material_and_lightmap_bind_group_slot & 0xffffu; let flags = pbr_bindings::material.flags; let base_color = pbr_bindings::material.base_color; let deferred_lighting_pass_id = pbr_bindings::material.deferred_lighting_pass_id; @@ -109,7 +115,7 @@ fn pbr_input_from_standard_material( #ifdef VERTEX_UVS #ifdef BINDLESS - let uv_transform = pbr_bindings::material[slot].uv_transform; + let uv_transform = pbr_bindings::material_array[material_indices[slot].material].uv_transform; #else // BINDLESS let uv_transform = pbr_bindings::material.uv_transform; #endif // BINDLESS @@ -138,9 +144,9 @@ fn pbr_input_from_standard_material( // TODO: Transforming UVs mean we need to apply derivative chain rule for meshlet mesh material pass uv = parallaxed_uv( #ifdef BINDLESS - pbr_bindings::material[slot].parallax_depth_scale, - pbr_bindings::material[slot].max_parallax_layer_count, - pbr_bindings::material[slot].max_relief_mapping_search_steps, + pbr_bindings::material_array[material_indices[slot].material].parallax_depth_scale, + pbr_bindings::material_array[material_indices[slot].material].max_parallax_layer_count, + pbr_bindings::material_array[material_indices[slot].material].max_relief_mapping_search_steps, #else // BINDLESS pbr_bindings::material.parallax_depth_scale, pbr_bindings::material.max_parallax_layer_count, @@ -159,9 +165,9 @@ fn pbr_input_from_standard_material( // TODO: Transforming UVs mean we need to apply derivative chain rule for meshlet mesh material pass uv_b = parallaxed_uv( #ifdef BINDLESS - pbr_bindings::material[slot].parallax_depth_scale, - pbr_bindings::material[slot].max_parallax_layer_count, - pbr_bindings::material[slot].max_relief_mapping_search_steps, + pbr_bindings::material_array[material_indices[slot].material].parallax_depth_scale, + pbr_bindings::material_array[material_indices[slot].material].max_parallax_layer_count, + pbr_bindings::material_array[material_indices[slot].material].max_relief_mapping_search_steps, #else // BINDLESS pbr_bindings::material.parallax_depth_scale, pbr_bindings::material.max_parallax_layer_count, @@ -188,8 +194,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::base_color_texture[slot], - pbr_bindings::base_color_sampler[slot], + bindless_textures_2d[material_indices[slot].base_color_texture], + bindless_samplers_filtering[material_indices[slot].base_color_sampler], #else // BINDLESS pbr_bindings::base_color_texture, pbr_bindings::base_color_sampler, @@ -215,7 +221,7 @@ fn pbr_input_from_standard_material( if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ALPHA_TO_COVERAGE { #ifdef BINDLESS - let alpha_cutoff = pbr_bindings::material[slot].alpha_cutoff; + let alpha_cutoff = pbr_bindings::material_array[material_indices[slot].material].alpha_cutoff; #else // BINDLESS let alpha_cutoff = pbr_bindings::material.alpha_cutoff; #endif // BINDLESS @@ -233,10 +239,13 @@ fn pbr_input_from_standard_material( // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit if ((flags & pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) { #ifdef BINDLESS - pbr_input.material.ior = pbr_bindings::material[slot].ior; - pbr_input.material.attenuation_color = pbr_bindings::material[slot].attenuation_color; - pbr_input.material.attenuation_distance = pbr_bindings::material[slot].attenuation_distance; - pbr_input.material.alpha_cutoff = pbr_bindings::material[slot].alpha_cutoff; + pbr_input.material.ior = pbr_bindings::material_array[material_indices[slot].material].ior; + pbr_input.material.attenuation_color = + pbr_bindings::material_array[material_indices[slot].material].attenuation_color; + pbr_input.material.attenuation_distance = + pbr_bindings::material_array[material_indices[slot].material].attenuation_distance; + pbr_input.material.alpha_cutoff = + pbr_bindings::material_array[material_indices[slot].material].alpha_cutoff; #else // BINDLESS pbr_input.material.ior = pbr_bindings::material.ior; pbr_input.material.attenuation_color = pbr_bindings::material.attenuation_color; @@ -246,7 +255,8 @@ fn pbr_input_from_standard_material( // reflectance #ifdef BINDLESS - pbr_input.material.reflectance = pbr_bindings::material[slot].reflectance; + pbr_input.material.reflectance = + pbr_bindings::material_array[material_indices[slot].material].reflectance; #else // BINDLESS pbr_input.material.reflectance = pbr_bindings::material.reflectance; #endif // BINDLESS @@ -263,8 +273,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::specular_texture[slot], - pbr_bindings::specular_sampler[slot], + bindless_textures_2d[material_indices[slot].specular_texture], + bindless_samplers_filtering[material_indices[slot].specular_sampler], #else // BINDLESS pbr_bindings::specular_texture, pbr_bindings::specular_sampler, @@ -295,8 +305,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::specular_tint_texture[slot], - pbr_bindings::specular_tint_sampler[slot], + bindless_textures_2d[material_indices[slot].specular_tint_texture], + bindless_samplers_filtering[material_indices[slot].specular_tint_sampler], #else // BINDLESS pbr_bindings::specular_tint_texture, pbr_bindings::specular_tint_sampler, @@ -321,7 +331,7 @@ fn pbr_input_from_standard_material( // emissive #ifdef BINDLESS - var emissive: vec4 = pbr_bindings::material[slot].emissive; + var emissive: vec4 = pbr_bindings::material_array[material_indices[slot].material].emissive; #else // BINDLESS var emissive: vec4 = pbr_bindings::material.emissive; #endif // BINDLESS @@ -335,8 +345,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::emissive_texture[slot], - pbr_bindings::emissive_sampler[slot], + bindless_textures_2d[material_indices[slot].emissive_texture], + bindless_samplers_filtering[material_indices[slot].emissive_sampler], #else // BINDLESS pbr_bindings::emissive_texture, pbr_bindings::emissive_sampler, @@ -360,8 +370,8 @@ fn pbr_input_from_standard_material( // metallic and perceptual roughness #ifdef BINDLESS - var metallic: f32 = pbr_bindings::material[slot].metallic; - var perceptual_roughness: f32 = pbr_bindings::material[slot].perceptual_roughness; + var metallic: f32 = pbr_bindings::material_array[material_indices[slot].material].metallic; + var perceptual_roughness: f32 = pbr_bindings::material_array[material_indices[slot].material].perceptual_roughness; #else // BINDLESS var metallic: f32 = pbr_bindings::material.metallic; var perceptual_roughness: f32 = pbr_bindings::material.perceptual_roughness; @@ -377,8 +387,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::metallic_roughness_texture[slot], - pbr_bindings::metallic_roughness_sampler[slot], + bindless_textures_2d[material_indices[slot].metallic_roughness_texture], + bindless_samplers_filtering[material_indices[slot].metallic_roughness_sampler], #else // BINDLESS pbr_bindings::metallic_roughness_texture, pbr_bindings::metallic_roughness_sampler, @@ -405,7 +415,8 @@ fn pbr_input_from_standard_material( // Clearcoat factor #ifdef BINDLESS - pbr_input.material.clearcoat = pbr_bindings::material[slot].clearcoat; + pbr_input.material.clearcoat = + pbr_bindings::material_array[material_indices[slot].material].clearcoat; #else // BINDLESS pbr_input.material.clearcoat = pbr_bindings::material.clearcoat; #endif // BINDLESS @@ -420,8 +431,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::clearcoat_texture[slot], - pbr_bindings::clearcoat_sampler[slot], + bindless_textures_2d[material_indices[slot].clearcoat_texture], + bindless_samplers_filtering[material_indices[slot].clearcoat_sampler], #else // BINDLESS pbr_bindings::clearcoat_texture, pbr_bindings::clearcoat_sampler, @@ -445,7 +456,7 @@ fn pbr_input_from_standard_material( // Clearcoat roughness #ifdef BINDLESS pbr_input.material.clearcoat_perceptual_roughness = - pbr_bindings::material[slot].clearcoat_perceptual_roughness; + pbr_bindings::material_array[material_indices[slot].material].clearcoat_perceptual_roughness; #else // BINDLESS pbr_input.material.clearcoat_perceptual_roughness = pbr_bindings::material.clearcoat_perceptual_roughness; @@ -461,8 +472,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::clearcoat_roughness_texture[slot], - pbr_bindings::clearcoat_roughness_sampler[slot], + bindless_textures_2d[material_indices[slot].clearcoat_roughness_texture], + bindless_samplers_filtering[material_indices[slot].clearcoat_roughness_sampler], #else // BINDLESS pbr_bindings::clearcoat_roughness_texture, pbr_bindings::clearcoat_roughness_sampler, @@ -484,7 +495,7 @@ fn pbr_input_from_standard_material( #endif // VERTEX_UVS #ifdef BINDLESS - var specular_transmission: f32 = pbr_bindings::material[slot].specular_transmission; + var specular_transmission: f32 = pbr_bindings::material_array[slot].specular_transmission; #else // BINDLESS var specular_transmission: f32 = pbr_bindings::material.specular_transmission; #endif // BINDLESS @@ -499,8 +510,12 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::specular_transmission_texture[slot], - pbr_bindings::specular_transmission_sampler[slot], + bindless_textures_2d[ + material_indices[slot].specular_transmission_texture + ], + bindless_samplers_filtering[ + material_indices[slot].specular_transmission_sampler + ], #else // BINDLESS pbr_bindings::specular_transmission_texture, pbr_bindings::specular_transmission_sampler, @@ -523,7 +538,7 @@ fn pbr_input_from_standard_material( pbr_input.material.specular_transmission = specular_transmission; #ifdef BINDLESS - var thickness: f32 = pbr_bindings::material[slot].thickness; + var thickness: f32 = pbr_bindings::material_array[material_indices[slot].material].thickness; #else // BINDLESS var thickness: f32 = pbr_bindings::material.thickness; #endif // BINDLESS @@ -538,8 +553,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::thickness_texture[slot], - pbr_bindings::thickness_sampler[slot], + bindless_textures_2d[material_indices[slot].thickness_texture], + bindless_samplers_filtering[material_indices[slot].thickness_sampler], #else // BINDLESS pbr_bindings::thickness_texture, pbr_bindings::thickness_sampler, @@ -569,7 +584,8 @@ fn pbr_input_from_standard_material( pbr_input.material.thickness = thickness; #ifdef BINDLESS - var diffuse_transmission = pbr_bindings::material[slot].diffuse_transmission; + var diffuse_transmission = + pbr_bindings::material_array[material_indices[slot].material].diffuse_transmission; #else // BINDLESS var diffuse_transmission = pbr_bindings::material.diffuse_transmission; #endif // BINDLESS @@ -584,8 +600,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::diffuse_transmission_texture[slot], - pbr_bindings::diffuse_transmission_sampler[slot], + bindless_textures_2d[material_indices[slot].diffuse_transmission_texture], + bindless_samplers_filtering[material_indices[slot].diffuse_transmission_sampler], #else // BINDLESS pbr_bindings::diffuse_transmission_texture, pbr_bindings::diffuse_transmission_sampler, @@ -618,8 +634,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::occlusion_texture[slot], - pbr_bindings::occlusion_sampler[slot], + bindless_textures_2d[material_indices[slot].occlusion_texture], + bindless_samplers_filtering[material_indices[slot].occlusion_sampler], #else // BINDLESS pbr_bindings::occlusion_texture, pbr_bindings::occlusion_sampler, @@ -669,8 +685,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::normal_map_texture[slot], - pbr_bindings::normal_map_sampler[slot], + bindless_textures_2d[material_indices[slot].normal_map_texture], + bindless_samplers_filtering[material_indices[slot].normal_map_sampler], #else // BINDLESS pbr_bindings::normal_map_texture, pbr_bindings::normal_map_sampler, @@ -707,8 +723,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::clearcoat_normal_texture[slot], - pbr_bindings::clearcoat_normal_sampler[slot], + bindless_textures_2d[material_indices[slot].clearcoat_normal_texture], + bindless_samplers_filtering[material_indices[slot].clearcoat_normal_sampler], #else // BINDLESS pbr_bindings::clearcoat_normal_texture, pbr_bindings::clearcoat_normal_sampler, @@ -750,8 +766,10 @@ fn pbr_input_from_standard_material( #ifdef STANDARD_MATERIAL_ANISOTROPY #ifdef BINDLESS - var anisotropy_strength = pbr_bindings::material[slot].anisotropy_strength; - var anisotropy_direction = pbr_bindings::material[slot].anisotropy_rotation; + var anisotropy_strength = + pbr_bindings::material_array[material_indices[slot].material].anisotropy_strength; + var anisotropy_direction = + pbr_bindings::material_array[material_indices[slot].material].anisotropy_rotation; #else // BINDLESS var anisotropy_strength = pbr_bindings::material.anisotropy_strength; var anisotropy_direction = pbr_bindings::material.anisotropy_rotation; @@ -766,8 +784,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::anisotropy_texture[slot], - pbr_bindings::anisotropy_sampler[slot], + bindless_textures_2d[material_indices[slot].anisotropy_texture], + bindless_samplers_filtering[material_indices[slot].anisotropy_sampler], #else // BINDLESS pbr_bindings::anisotropy_texture, pbr_bindings::anisotropy_sampler, @@ -810,7 +828,8 @@ fn pbr_input_from_standard_material( #ifdef LIGHTMAP #ifdef BINDLESS - let lightmap_exposure = pbr_bindings::material[slot].lightmap_exposure; + let lightmap_exposure = + pbr_bindings::material_array[material_indices[slot].material].lightmap_exposure; #else // BINDLESS let lightmap_exposure = pbr_bindings::material.lightmap_exposure; #endif // BINDLESS diff --git a/crates/bevy_pbr/src/render/pbr_functions.wgsl b/crates/bevy_pbr/src/render/pbr_functions.wgsl index 44890b3a65..dcda30ee79 100644 --- a/crates/bevy_pbr/src/render/pbr_functions.wgsl +++ b/crates/bevy_pbr/src/render/pbr_functions.wgsl @@ -241,7 +241,7 @@ fn bend_normal_for_anisotropy(lighting_input: ptr = pbr_bindings::material[slot].base_color; + var output_color: vec4 = pbr_bindings::material_array[material_indices[slot].material].base_color; + let flags = pbr_bindings::material_array[material_indices[slot].material].flags; #else // BINDLESS var output_color: vec4 = pbr_bindings::material.base_color; + let flags = pbr_bindings::material.flags; #endif // BINDLESS #ifdef VERTEX_UVS @@ -31,19 +39,17 @@ fn prepass_alpha_discard(in: VertexOutput) { #endif // STANDARD_MATERIAL_BASE_COLOR_UV_B #ifdef BINDLESS - let uv_transform = pbr_bindings::material[slot].uv_transform; - let flags = pbr_bindings::material[slot].flags; + let uv_transform = pbr_bindings::material_array[material_indices[slot].material].uv_transform; #else // BINDLESS let uv_transform = pbr_bindings::material.uv_transform; - let flags = pbr_bindings::material.flags; #endif // BINDLESS uv = (uv_transform * vec3(uv, 1.0)).xy; if (flags & pbr_types::STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u { output_color = output_color * textureSampleBias( #ifdef BINDLESS - pbr_bindings::base_color_texture[slot], - pbr_bindings::base_color_sampler[slot], + bindless_textures_2d[material_indices[slot].base_color_texture], + bindless_samplers_filtering[material_indices[slot].base_color_sampler], #else // BINDLESS pbr_bindings::base_color_texture, pbr_bindings::base_color_sampler, @@ -57,7 +63,7 @@ fn prepass_alpha_discard(in: VertexOutput) { let alpha_mode = flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK { #ifdef BINDLESS - let alpha_cutoff = pbr_bindings::material[slot].alpha_cutoff; + let alpha_cutoff = pbr_bindings::material_array[material_indices[slot].material].alpha_cutoff; #else // BINDLESS let alpha_cutoff = pbr_bindings::material.alpha_cutoff; #endif // BINDLESS diff --git a/crates/bevy_pbr/src/render/skin.rs b/crates/bevy_pbr/src/render/skin.rs index c248821cca..476e06c1e7 100644 --- a/crates/bevy_pbr/src/render/skin.rs +++ b/crates/bevy_pbr/src/render/skin.rs @@ -1,19 +1,24 @@ use core::mem::{self, size_of}; use std::sync::OnceLock; -use bevy_asset::Assets; +use bevy_asset::{prelude::AssetChanged, Assets}; use bevy_ecs::prelude::*; use bevy_math::Mat4; -use bevy_render::sync_world::MainEntityHashMap; +use bevy_platform::collections::hash_map::Entry; +use bevy_render::render_resource::{Buffer, BufferDescriptor}; +use bevy_render::sync_world::{MainEntity, MainEntityHashMap, MainEntityHashSet}; use bevy_render::{ batching::NoAutomaticBatching, mesh::skinning::{SkinnedMesh, SkinnedMeshInverseBindposes}, - render_resource::{BufferUsages, RawBufferVec}, + render_resource::BufferUsages, renderer::{RenderDevice, RenderQueue}, view::ViewVisibility, Extract, }; use bevy_transform::prelude::GlobalTransform; +use offset_allocator::{Allocation, Allocator}; +use smallvec::SmallVec; +use tracing::error; /// Maximum number of joints supported for skinned meshes. /// @@ -24,18 +29,40 @@ use bevy_transform::prelude::GlobalTransform; /// of the GPU at runtime, which would mean not using consts anymore. pub const MAX_JOINTS: usize = 256; +/// The total number of joints we support. +/// +/// This is 256 GiB worth of joint matrices, which we will never hit under any +/// reasonable circumstances. +const MAX_TOTAL_JOINTS: u32 = 1024 * 1024 * 1024; + +/// The number of joints that we allocate at a time. +/// +/// Some hardware requires that uniforms be allocated on 256-byte boundaries, so +/// we need to allocate 4 64-byte matrices at a time to satisfy alignment +/// requirements. +const JOINTS_PER_ALLOCATION_UNIT: u32 = (256 / size_of::()) as u32; + +/// The maximum ratio of the number of entities whose transforms changed to the +/// total number of joints before we re-extract all joints. +/// +/// We use this as a heuristic to decide whether it's worth switching over to +/// fine-grained detection to determine which skins need extraction. If the +/// number of changed entities is over this threshold, we skip change detection +/// and simply re-extract the transforms of all joints. +const JOINT_EXTRACTION_THRESHOLD_FACTOR: f64 = 0.25; + /// The location of the first joint matrix in the skin uniform buffer. -#[derive(Component)] -pub struct SkinIndex { +#[derive(Clone, Copy)] +pub struct SkinByteOffset { /// The byte offset of the first joint matrix. pub byte_offset: u32, } -impl SkinIndex { +impl SkinByteOffset { /// Index to be in address space based on the size of a skin uniform. - const fn new(start: usize) -> Self { - SkinIndex { - byte_offset: (start * size_of::()) as u32, + const fn from_index(index: usize) -> Self { + SkinByteOffset { + byte_offset: (index * size_of::()) as u32, } } @@ -47,22 +74,6 @@ impl SkinIndex { } } -/// Maps each skinned mesh to the applicable offset within the [`SkinUniforms`] -/// buffer. -/// -/// We store both the current frame's joint matrices and the previous frame's -/// joint matrices for the purposes of motion vector calculation. -#[derive(Default, Resource)] -pub struct SkinIndices { - /// Maps each skinned mesh to the applicable offset within - /// [`SkinUniforms::current_buffer`]. - pub current: MainEntityHashMap, - - /// Maps each skinned mesh to the applicable offset within - /// [`SkinUniforms::prev_buffer`]. - pub prev: MainEntityHashMap, -} - /// The GPU buffers containing joint matrices for all skinned meshes. /// /// This is double-buffered: we store the joint matrices of each mesh for the @@ -74,28 +85,109 @@ pub struct SkinIndices { /// Notes on implementation: see comment on top of the `extract_skins` system. #[derive(Resource)] pub struct SkinUniforms { - /// Stores all the joint matrices for skinned meshes in the current frame. - pub current_buffer: RawBufferVec, - /// Stores all the joint matrices for skinned meshes in the previous frame. - pub prev_buffer: RawBufferVec, + /// The CPU-side buffer that stores the joint matrices for skinned meshes in + /// the current frame. + pub current_staging_buffer: Vec, + /// The GPU-side buffer that stores the joint matrices for skinned meshes in + /// the current frame. + pub current_buffer: Buffer, + /// The GPU-side buffer that stores the joint matrices for skinned meshes in + /// the previous frame. + pub prev_buffer: Buffer, + /// The offset allocator that manages the placement of the joints within the + /// [`Self::current_buffer`]. + allocator: Allocator, + /// Allocation information that we keep about each skin. + skin_uniform_info: MainEntityHashMap, + /// Maps each joint entity to the skins it's associated with. + /// + /// We use this in conjunction with change detection to only update the + /// skins that need updating each frame. + /// + /// Note that conceptually this is a hash map of sets, but we use a + /// [`SmallVec`] to avoid allocations for the vast majority of the cases in + /// which each bone belongs to exactly one skin. + joint_to_skins: MainEntityHashMap>, + /// The total number of joints in the scene. + /// + /// We use this as part of our heuristic to decide whether to use + /// fine-grained change detection. + total_joints: usize, } impl FromWorld for SkinUniforms { fn from_world(world: &mut World) -> Self { let device = world.resource::(); - let buffer_usages = if skins_use_uniform_buffers(device) { + let buffer_usages = (if skins_use_uniform_buffers(device) { BufferUsages::UNIFORM } else { BufferUsages::STORAGE - }; + }) | BufferUsages::COPY_DST; + + // Create the current and previous buffer with the minimum sizes. + // + // These will be swapped every frame. + let current_buffer = device.create_buffer(&BufferDescriptor { + label: Some("skin uniform buffer"), + size: MAX_JOINTS as u64 * size_of::() as u64, + usage: buffer_usages, + mapped_at_creation: false, + }); + let prev_buffer = device.create_buffer(&BufferDescriptor { + label: Some("skin uniform buffer"), + size: MAX_JOINTS as u64 * size_of::() as u64, + usage: buffer_usages, + mapped_at_creation: false, + }); Self { - current_buffer: RawBufferVec::new(buffer_usages), - prev_buffer: RawBufferVec::new(buffer_usages), + current_staging_buffer: vec![], + current_buffer, + prev_buffer, + allocator: Allocator::new(MAX_TOTAL_JOINTS), + skin_uniform_info: MainEntityHashMap::default(), + joint_to_skins: MainEntityHashMap::default(), + total_joints: 0, } } } +impl SkinUniforms { + /// Returns the current offset in joints of the skin in the buffer. + pub fn skin_index(&self, skin: MainEntity) -> Option { + self.skin_uniform_info + .get(&skin) + .map(SkinUniformInfo::offset) + } + + /// Returns the current offset in bytes of the skin in the buffer. + pub fn skin_byte_offset(&self, skin: MainEntity) -> Option { + self.skin_uniform_info.get(&skin).map(|skin_uniform_info| { + SkinByteOffset::from_index(skin_uniform_info.offset() as usize) + }) + } + + /// Returns an iterator over all skins in the scene. + pub fn all_skins(&self) -> impl Iterator { + self.skin_uniform_info.keys() + } +} + +/// Allocation information about each skin. +struct SkinUniformInfo { + /// The allocation of the joints within the [`SkinUniforms::current_buffer`]. + allocation: Allocation, + /// The entities that comprise the joints. + joints: Vec, +} + +impl SkinUniformInfo { + /// The offset in joints within the [`SkinUniforms::current_staging_buffer`]. + fn offset(&self) -> u32 { + self.allocation.offset * JOINTS_PER_ALLOCATION_UNIT + } +} + /// Returns true if skinning must use uniforms (and dynamic offsets) because /// storage buffers aren't supported on the current platform. pub fn skins_use_uniform_buffers(render_device: &RenderDevice) -> bool { @@ -104,20 +196,72 @@ pub fn skins_use_uniform_buffers(render_device: &RenderDevice) -> bool { .get_or_init(|| render_device.limits().max_storage_buffers_per_shader_stage == 0) } +/// Uploads the buffers containing the joints to the GPU. pub fn prepare_skins( render_device: Res, render_queue: Res, - mut uniform: ResMut, + uniform: ResMut, ) { - if uniform.current_buffer.is_empty() { + let uniform = uniform.into_inner(); + + if uniform.current_staging_buffer.is_empty() { return; } - let len = uniform.current_buffer.len(); - uniform.current_buffer.reserve(len, &render_device); - uniform - .current_buffer - .write_buffer(&render_device, &render_queue); + // Swap current and previous buffers. + mem::swap(&mut uniform.current_buffer, &mut uniform.prev_buffer); + + // Resize the buffers if necessary. Include extra space equal to `MAX_JOINTS` + // because we need to be able to bind a full uniform buffer's worth of data + // if skins use uniform buffers on this platform. + let needed_size = (uniform.current_staging_buffer.len() as u64 + MAX_JOINTS as u64) + * size_of::() as u64; + if uniform.current_buffer.size() < needed_size { + let mut new_size = uniform.current_buffer.size(); + while new_size < needed_size { + // 1.5× growth factor. + new_size += new_size / 2; + } + + // Create the new buffers. + let buffer_usages = if skins_use_uniform_buffers(&render_device) { + BufferUsages::UNIFORM + } else { + BufferUsages::STORAGE + } | BufferUsages::COPY_DST; + uniform.current_buffer = render_device.create_buffer(&BufferDescriptor { + label: Some("skin uniform buffer"), + usage: buffer_usages, + size: new_size, + mapped_at_creation: false, + }); + uniform.prev_buffer = render_device.create_buffer(&BufferDescriptor { + label: Some("skin uniform buffer"), + usage: buffer_usages, + size: new_size, + mapped_at_creation: false, + }); + + // We've created a new `prev_buffer` but we don't have the previous joint + // data needed to fill it out correctly. Use the current joint data + // instead. + // + // TODO: This is a bug - will cause motion blur to ignore joint movement + // for one frame. + render_queue.write_buffer( + &uniform.prev_buffer, + 0, + bytemuck::must_cast_slice(&uniform.current_staging_buffer[..]), + ); + } + + // Write the data from `uniform.current_staging_buffer` into + // `uniform.current_buffer`. + render_queue.write_buffer( + &uniform.current_buffer, + 0, + bytemuck::must_cast_slice(&uniform.current_staging_buffer[..]), + ); // We don't need to write `uniform.prev_buffer` because we already wrote it // last frame, and the data should still be on the GPU. @@ -150,71 +294,320 @@ pub fn prepare_skins( // which normally only support fixed size arrays. You just have to make sure // in the shader that you only read the values that are valid for that binding. pub fn extract_skins( - skin_indices: ResMut, - uniform: ResMut, - query: Extract>, - inverse_bindposes: Extract>>, + skin_uniforms: ResMut, + skinned_meshes: Extract>, + changed_skinned_meshes: Extract< + Query< + (Entity, &ViewVisibility, &SkinnedMesh), + Or<( + Changed, + Changed, + AssetChanged, + )>, + >, + >, + skinned_mesh_inverse_bindposes: Extract>>, + changed_transforms: Extract>>, joints: Extract>, - render_device: Res, + mut removed_visibilities_query: Extract>, + mut removed_skinned_meshes_query: Extract>, ) { - let skins_use_uniform_buffers = skins_use_uniform_buffers(&render_device); + let skin_uniforms = skin_uniforms.into_inner(); - // Borrow check workaround. - let (skin_indices, uniform) = (skin_indices.into_inner(), uniform.into_inner()); + // Find skins that have become visible or invisible on this frame. Allocate, + // reallocate, or free space for them as necessary. + add_or_delete_skins( + skin_uniforms, + &changed_skinned_meshes, + &skinned_mesh_inverse_bindposes, + &joints, + ); - // Swap buffers. We need to keep the previous frame's buffer around for the - // purposes of motion vector computation. - mem::swap(&mut skin_indices.current, &mut skin_indices.prev); - mem::swap(&mut uniform.current_buffer, &mut uniform.prev_buffer); - skin_indices.current.clear(); - uniform.current_buffer.clear(); + // Extract the transforms for all joints from the scene, and write them into + // the staging buffer at the appropriate spot. + extract_joints( + skin_uniforms, + &skinned_meshes, + &changed_skinned_meshes, + &skinned_mesh_inverse_bindposes, + &changed_transforms, + &joints, + ); - let mut last_start = 0; + // Delete skins that became invisible. + for skinned_mesh_entity in removed_visibilities_query + .read() + .chain(removed_skinned_meshes_query.read()) + { + // Only remove a skin if we didn't pick it up in `add_or_delete_skins`. + // It's possible that a necessary component was removed and re-added in + // the same frame. + if !changed_skinned_meshes.contains(skinned_mesh_entity) { + remove_skin(skin_uniforms, skinned_mesh_entity.into()); + } + } +} - // PERF: This can be expensive, can we move this to prepare? - for (entity, view_visibility, skin) in &query { - if !view_visibility.get() { +/// Searches for all skins that have become visible or invisible this frame and +/// allocations for them as necessary. +fn add_or_delete_skins( + skin_uniforms: &mut SkinUniforms, + changed_skinned_meshes: &Query< + (Entity, &ViewVisibility, &SkinnedMesh), + Or<( + Changed, + Changed, + AssetChanged, + )>, + >, + skinned_mesh_inverse_bindposes: &Assets, + joints: &Query<&GlobalTransform>, +) { + // Find every skinned mesh that changed one of (1) visibility; (2) joint + // entities (part of `SkinnedMesh`); (3) the associated + // `SkinnedMeshInverseBindposes` asset. + for (skinned_mesh_entity, skinned_mesh_view_visibility, skinned_mesh) in changed_skinned_meshes + { + // Remove the skin if it existed last frame. + let skinned_mesh_entity = MainEntity::from(skinned_mesh_entity); + remove_skin(skin_uniforms, skinned_mesh_entity); + + // If the skin is invisible, we're done. + if !(*skinned_mesh_view_visibility).get() { continue; } - let buffer = &mut uniform.current_buffer; - let Some(inverse_bindposes) = inverse_bindposes.get(&skin.inverse_bindposes) else { + + // Initialize the skin. + add_skin( + skinned_mesh_entity, + skinned_mesh, + skin_uniforms, + skinned_mesh_inverse_bindposes, + joints, + ); + } +} + +/// Extracts the global transforms of all joints and updates the staging buffer +/// as necessary. +fn extract_joints( + skin_uniforms: &mut SkinUniforms, + skinned_meshes: &Query<(Entity, &SkinnedMesh)>, + changed_skinned_meshes: &Query< + (Entity, &ViewVisibility, &SkinnedMesh), + Or<( + Changed, + Changed, + AssetChanged, + )>, + >, + skinned_mesh_inverse_bindposes: &Assets, + changed_transforms: &Query<(Entity, &GlobalTransform), Changed>, + joints: &Query<&GlobalTransform>, +) { + // If the number of entities that changed transforms exceeds a certain + // fraction (currently 25%) of the total joints in the scene, then skip + // fine-grained change detection. + // + // Note that this is a crude heuristic, for performance reasons. It doesn't + // consider the ratio of modified *joints* to total joints, only the ratio + // of modified *entities* to total joints. Thus in the worst case we might + // end up re-extracting all skins even though none of the joints changed. + // But making the heuristic finer-grained would make it slower to evaluate, + // and we don't want to lose performance. + let threshold = + (skin_uniforms.total_joints as f64 * JOINT_EXTRACTION_THRESHOLD_FACTOR).floor() as usize; + + if changed_transforms.iter().nth(threshold).is_some() { + // Go ahead and re-extract all skins in the scene. + for (skin_entity, skin) in skinned_meshes { + extract_joints_for_skin( + skin_entity.into(), + skin, + skin_uniforms, + changed_skinned_meshes, + skinned_mesh_inverse_bindposes, + joints, + ); + } + return; + } + + // Use fine-grained change detection to figure out only the skins that need + // to have their joints re-extracted. + let dirty_skins: MainEntityHashSet = changed_transforms + .iter() + .flat_map(|(joint, _)| skin_uniforms.joint_to_skins.get(&MainEntity::from(joint))) + .flat_map(|skin_joint_mappings| skin_joint_mappings.iter()) + .copied() + .collect(); + + // Re-extract the joints for only those skins. + for skin_entity in dirty_skins { + let Ok((_, skin)) = skinned_meshes.get(*skin_entity) else { continue; }; - let start = buffer.len(); - - let target = start + skin.joints.len().min(MAX_JOINTS); - buffer.extend( - joints - .iter_many(&skin.joints) - .zip(inverse_bindposes.iter()) - .take(MAX_JOINTS) - .map(|(joint, bindpose)| joint.affine() * *bindpose), + extract_joints_for_skin( + skin_entity, + skin, + skin_uniforms, + changed_skinned_meshes, + skinned_mesh_inverse_bindposes, + joints, ); - // iter_many will skip any failed fetches. This will cause it to assign the wrong bones, - // so just bail by truncating to the start. - if buffer.len() != target { - buffer.truncate(start); - continue; - } - last_start = last_start.max(start); + } +} - // Pad to 256 byte alignment if we're using a uniform buffer. - // There's no need to do this if we're using storage buffers, though. - if skins_use_uniform_buffers { - while buffer.len() % 4 != 0 { - buffer.push(Mat4::ZERO); +/// Extracts all joints for a single skin and writes their transforms into the +/// CPU staging buffer. +fn extract_joints_for_skin( + skin_entity: MainEntity, + skin: &SkinnedMesh, + skin_uniforms: &mut SkinUniforms, + changed_skinned_meshes: &Query< + (Entity, &ViewVisibility, &SkinnedMesh), + Or<( + Changed, + Changed, + AssetChanged, + )>, + >, + skinned_mesh_inverse_bindposes: &Assets, + joints: &Query<&GlobalTransform>, +) { + // If we initialized the skin this frame, we already populated all + // the joints, so there's no need to populate them again. + if changed_skinned_meshes.contains(*skin_entity) { + return; + } + + // Fetch information about the skin. + let Some(skin_uniform_info) = skin_uniforms.skin_uniform_info.get(&skin_entity) else { + return; + }; + let Some(skinned_mesh_inverse_bindposes) = + skinned_mesh_inverse_bindposes.get(&skin.inverse_bindposes) + else { + return; + }; + + // Calculate and write in the new joint matrices. + for (joint_index, (&joint, skinned_mesh_inverse_bindpose)) in skin + .joints + .iter() + .zip(skinned_mesh_inverse_bindposes.iter()) + .enumerate() + { + let Ok(joint_transform) = joints.get(joint) else { + continue; + }; + + let joint_matrix = joint_transform.affine() * *skinned_mesh_inverse_bindpose; + skin_uniforms.current_staging_buffer[skin_uniform_info.offset() as usize + joint_index] = + joint_matrix; + } +} + +/// Allocates space for a new skin in the buffers, and populates its joints. +fn add_skin( + skinned_mesh_entity: MainEntity, + skinned_mesh: &SkinnedMesh, + skin_uniforms: &mut SkinUniforms, + skinned_mesh_inverse_bindposes: &Assets, + joints: &Query<&GlobalTransform>, +) { + // Allocate space for the joints. + let Some(allocation) = skin_uniforms.allocator.allocate( + skinned_mesh + .joints + .len() + .div_ceil(JOINTS_PER_ALLOCATION_UNIT as usize) as u32, + ) else { + error!( + "Out of space for skin: {:?}. Tried to allocate space for {:?} joints.", + skinned_mesh_entity, + skinned_mesh.joints.len() + ); + return; + }; + + // Store that allocation. + let skin_uniform_info = SkinUniformInfo { + allocation, + joints: skinned_mesh + .joints + .iter() + .map(|entity| MainEntity::from(*entity)) + .collect(), + }; + + let skinned_mesh_inverse_bindposes = + skinned_mesh_inverse_bindposes.get(&skinned_mesh.inverse_bindposes); + + for (joint_index, &joint) in skinned_mesh.joints.iter().enumerate() { + // Calculate the initial joint matrix. + let skinned_mesh_inverse_bindpose = + skinned_mesh_inverse_bindposes.and_then(|skinned_mesh_inverse_bindposes| { + skinned_mesh_inverse_bindposes.get(joint_index) + }); + let joint_matrix = match (skinned_mesh_inverse_bindpose, joints.get(joint)) { + (Some(skinned_mesh_inverse_bindpose), Ok(transform)) => { + transform.affine() * *skinned_mesh_inverse_bindpose + } + _ => Mat4::IDENTITY, + }; + + // Write in the new joint matrix, growing the staging buffer if + // necessary. + let buffer_index = skin_uniform_info.offset() as usize + joint_index; + if skin_uniforms.current_staging_buffer.len() < buffer_index + 1 { + skin_uniforms + .current_staging_buffer + .resize(buffer_index + 1, Mat4::IDENTITY); + } + skin_uniforms.current_staging_buffer[buffer_index] = joint_matrix; + + // Record the inverse mapping from the joint back to the skin. We use + // this in order to perform fine-grained joint extraction. + skin_uniforms + .joint_to_skins + .entry(MainEntity::from(joint)) + .or_default() + .push(skinned_mesh_entity); + } + + // Record the number of joints. + skin_uniforms.total_joints += skinned_mesh.joints.len(); + + skin_uniforms + .skin_uniform_info + .insert(skinned_mesh_entity, skin_uniform_info); +} + +/// Deallocates a skin and removes it from the [`SkinUniforms`]. +fn remove_skin(skin_uniforms: &mut SkinUniforms, skinned_mesh_entity: MainEntity) { + let Some(old_skin_uniform_info) = skin_uniforms.skin_uniform_info.remove(&skinned_mesh_entity) + else { + return; + }; + + // Free the allocation. + skin_uniforms + .allocator + .free(old_skin_uniform_info.allocation); + + // Remove the inverse mapping from each joint back to the skin. + for &joint in &old_skin_uniform_info.joints { + if let Entry::Occupied(mut entry) = skin_uniforms.joint_to_skins.entry(joint) { + entry.get_mut().retain(|skin| *skin != skinned_mesh_entity); + if entry.get_mut().is_empty() { + entry.remove(); } } - - skin_indices - .current - .insert(entity.into(), SkinIndex::new(start)); } - // Pad out the buffer to ensure that there's enough space for bindings - while uniform.current_buffer.len() - last_start < MAX_JOINTS { - uniform.current_buffer.push(Mat4::ZERO); - } + // Update the total number of joints. + skin_uniforms.total_joints -= old_skin_uniform_info.joints.len(); } // NOTE: The skinned joints uniform buffer has to be bound at a dynamic offset per diff --git a/crates/bevy_pbr/src/render/skinning.wgsl b/crates/bevy_pbr/src/render/skinning.wgsl index 92e977aeb1..1762a73887 100644 --- a/crates/bevy_pbr/src/render/skinning.wgsl +++ b/crates/bevy_pbr/src/render/skinning.wgsl @@ -34,7 +34,7 @@ fn skin_model( + weights.z * joint_matrices.data[indexes.z] + weights.w * joint_matrices.data[indexes.w]; #else // SKINS_USE_UNIFORM_BUFFERS - let skin_index = mesh[instance_index].current_skin_index; + var skin_index = mesh[instance_index].current_skin_index; return weights.x * joint_matrices[skin_index + indexes.x] + weights.y * joint_matrices[skin_index + indexes.y] + weights.z * joint_matrices[skin_index + indexes.z] @@ -57,7 +57,7 @@ fn skin_prev_model( + weights.z * prev_joint_matrices.data[indexes.z] + weights.w * prev_joint_matrices.data[indexes.w]; #else // SKINS_USE_UNIFORM_BUFFERS - let skin_index = mesh[instance_index].previous_skin_index; + let skin_index = mesh[instance_index].current_skin_index; return weights.x * prev_joint_matrices[skin_index + indexes.x] + weights.y * prev_joint_matrices[skin_index + indexes.y] + weights.z * prev_joint_matrices[skin_index + indexes.z] diff --git a/crates/bevy_pbr/src/render/wireframe.wgsl b/crates/bevy_pbr/src/render/wireframe.wgsl index 981e5e1b1d..3873ffa3dd 100644 --- a/crates/bevy_pbr/src/render/wireframe.wgsl +++ b/crates/bevy_pbr/src/render/wireframe.wgsl @@ -1,12 +1,12 @@ #import bevy_pbr::forward_io::VertexOutput -struct WireframeMaterial { - color: vec4, -}; +struct PushConstants { + color: vec4 +} + +var push_constants: PushConstants; -@group(2) @binding(0) -var material: WireframeMaterial; @fragment fn fragment(in: VertexOutput) -> @location(0) vec4 { - return material.color; + return push_constants.color; } diff --git a/crates/bevy_pbr/src/ssao/mod.rs b/crates/bevy_pbr/src/ssao/mod.rs index 4d97f52cff..9098f82773 100644 --- a/crates/bevy_pbr/src/ssao/mod.rs +++ b/crates/bevy_pbr/src/ssao/mod.rs @@ -7,11 +7,11 @@ use bevy_core_pipeline::{ prepass::{DepthPrepass, NormalPrepass, ViewPrepassTextures}, }; use bevy_ecs::{ - prelude::{require, Component, Entity}, + prelude::{Component, Entity}, query::{Has, QueryItem, With}, reflect::ReflectComponent, resource::Resource, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; @@ -146,12 +146,12 @@ impl Plugin for ScreenSpaceAmbientOcclusionPlugin { /// Requires that you add [`ScreenSpaceAmbientOcclusionPlugin`] to your app. /// /// It strongly recommended that you use SSAO in conjunction with -/// TAA ([`bevy_core_pipeline::experimental::taa::TemporalAntiAliasing`]). +/// TAA (`TemporalAntiAliasing`). /// Doing so greatly reduces SSAO noise. /// /// SSAO is not supported on `WebGL2`, and is not currently supported on `WebGPU`. #[derive(Component, ExtractComponent, Reflect, PartialEq, Clone, Debug)] -#[reflect(Component, Debug, Default, PartialEq)] +#[reflect(Component, Debug, Default, PartialEq, Clone)] #[require(DepthPrepass, NormalPrepass)] #[doc(alias = "Ssao")] pub struct ScreenSpaceAmbientOcclusion { @@ -174,6 +174,7 @@ impl Default for ScreenSpaceAmbientOcclusion { } #[derive(Reflect, PartialEq, Eq, Hash, Clone, Copy, Default, Debug)] +#[reflect(PartialEq, Hash, Clone, Default)] pub enum ScreenSpaceAmbientOcclusionQualityLevel { Low, Medium, diff --git a/crates/bevy_pbr/src/ssr/mod.rs b/crates/bevy_pbr/src/ssr/mod.rs index 15b783cef5..1ee73da8f0 100644 --- a/crates/bevy_pbr/src/ssr/mod.rs +++ b/crates/bevy_pbr/src/ssr/mod.rs @@ -12,12 +12,12 @@ use bevy_core_pipeline::{ }; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - component::{require, Component}, + component::Component, entity::Entity, query::{Has, QueryItem, With}, reflect::ReflectComponent, resource::Resource, - schedule::IntoSystemConfigs as _, + schedule::IntoScheduleConfigs as _, system::{lifetimeless::Read, Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; @@ -81,7 +81,7 @@ pub struct ScreenSpaceReflectionsPlugin; /// bug whereby Naga doesn't generate correct GLSL when sampling depth buffers, /// which is required for screen-space raymarching. #[derive(Clone, Copy, Component, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] #[require(DepthPrepass, DeferredPrepass)] #[doc(alias = "Ssr")] pub struct ScreenSpaceReflections { diff --git a/crates/bevy_pbr/src/ssr/raymarch.wgsl b/crates/bevy_pbr/src/ssr/raymarch.wgsl index c7e8495b5a..e149edfbbc 100644 --- a/crates/bevy_pbr/src/ssr/raymarch.wgsl +++ b/crates/bevy_pbr/src/ssr/raymarch.wgsl @@ -286,7 +286,7 @@ struct DepthRayMarchResult { /// Range: `0..=1` as a lerp factor over `ray_start_cs..=ray_end_cs`. hit_t: f32, - /// UV correspindong to `hit_t`. + /// UV corresponding to `hit_t`. hit_uv: vec2, /// The distance that the hit point penetrates into the hit surface. diff --git a/crates/bevy_pbr/src/volumetric_fog/mod.rs b/crates/bevy_pbr/src/volumetric_fog/mod.rs index 4b90d63afc..b9f1d60945 100644 --- a/crates/bevy_pbr/src/volumetric_fog/mod.rs +++ b/crates/bevy_pbr/src/volumetric_fog/mod.rs @@ -37,9 +37,7 @@ use bevy_core_pipeline::core_3d::{ prepare_core_3d_depth_textures, }; use bevy_ecs::{ - component::{require, Component}, - reflect::ReflectComponent, - schedule::IntoSystemConfigs as _, + component::Component, reflect::ReflectComponent, schedule::IntoScheduleConfigs as _, }; use bevy_image::Image; use bevy_math::{ @@ -73,14 +71,14 @@ pub struct VolumetricFogPlugin; /// /// This allows the light to generate light shafts/god rays. #[derive(Clone, Copy, Component, Default, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct VolumetricLight; /// When placed on a [`bevy_core_pipeline::core_3d::Camera3d`], enables /// volumetric fog and volumetric lighting, also known as light shafts or god /// rays. #[derive(Clone, Copy, Component, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct VolumetricFog { /// Color of the ambient light. /// @@ -118,7 +116,7 @@ pub struct VolumetricFog { } #[derive(Clone, Component, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require(Transform, Visibility)] pub struct FogVolume { /// The color of the fog. diff --git a/crates/bevy_pbr/src/volumetric_fog/render.rs b/crates/bevy_pbr/src/volumetric_fog/render.rs index 292ad1c86a..07012a72e2 100644 --- a/crates/bevy_pbr/src/volumetric_fog/render.rs +++ b/crates/bevy_pbr/src/volumetric_fog/render.rs @@ -628,7 +628,10 @@ pub fn prepare_volumetric_fog_pipelines( >, meshes: Res>, ) { - let plane_mesh = meshes.get(&PLANE_MESH).expect("Plane mesh not found!"); + let Some(plane_mesh) = meshes.get(&PLANE_MESH) else { + // There's an off chance that the mesh won't be prepared yet if `RenderAssetBytesPerFrame` limiting is in use. + return; + }; for ( entity, @@ -694,20 +697,20 @@ pub fn prepare_volumetric_fog_uniforms( render_queue: Res, mut local_from_world_matrices: Local>, ) { - let Some(mut writer) = volumetric_lighting_uniform_buffer.get_writer( - view_targets.iter().len(), - &render_device, - &render_queue, - ) else { - return; - }; - // Do this up front to avoid O(n^2) matrix inversion. local_from_world_matrices.clear(); for (_, _, fog_transform) in fog_volumes.iter() { local_from_world_matrices.push(fog_transform.compute_matrix().inverse()); } + let uniform_count = view_targets.iter().len() * local_from_world_matrices.len(); + + let Some(mut writer) = + volumetric_lighting_uniform_buffer.get_writer(uniform_count, &render_device, &render_queue) + else { + return; + }; + for (view_entity, extracted_view, volumetric_fog) in view_targets.iter() { let world_from_view = extracted_view.world_from_view.compute_matrix(); diff --git a/crates/bevy_pbr/src/wireframe.rs b/crates/bevy_pbr/src/wireframe.rs index 68862bbf71..407062064a 100644 --- a/crates/bevy_pbr/src/wireframe.rs +++ b/crates/bevy_pbr/src/wireframe.rs @@ -1,15 +1,60 @@ -use crate::{Material, MaterialPipeline, MaterialPipelineKey, MaterialPlugin, MeshMaterial3d}; -use bevy_app::{Plugin, Startup, Update}; -use bevy_asset::{load_internal_asset, weak_handle, Asset, AssetApp, Assets, Handle}; -use bevy_color::{Color, LinearRgba}; -use bevy_ecs::prelude::*; -use bevy_reflect::{std_traits::ReflectDefault, Reflect}; -use bevy_render::{ - extract_resource::ExtractResource, - mesh::{Mesh3d, MeshVertexBufferLayoutRef}, - prelude::*, - render_resource::*, +use crate::{ + DrawMesh, MeshPipeline, MeshPipelineKey, RenderMeshInstanceFlags, RenderMeshInstances, + SetMeshBindGroup, SetMeshViewBindGroup, ViewKeyCache, ViewSpecializationTicks, }; +use bevy_app::{App, Plugin, PostUpdate, Startup, Update}; +use bevy_asset::{ + load_internal_asset, prelude::AssetChanged, weak_handle, AsAssetId, Asset, AssetApp, + AssetEvents, AssetId, Assets, Handle, UntypedAssetId, +}; +use bevy_color::{Color, ColorToComponents}; +use bevy_core_pipeline::core_3d::{ + graph::{Core3d, Node3d}, + Camera3d, +}; +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::{ + component::Tick, + prelude::*, + query::QueryItem, + system::{lifetimeless::SRes, SystemChangeTick, SystemParamItem}, +}; +use bevy_platform::{ + collections::{HashMap, HashSet}, + hash::FixedHasher, +}; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; +use bevy_render::camera::extract_cameras; +use bevy_render::{ + batching::gpu_preprocessing::{GpuPreprocessingMode, GpuPreprocessingSupport}, + camera::ExtractedCamera, + extract_resource::ExtractResource, + mesh::{ + allocator::{MeshAllocator, SlabId}, + Mesh3d, MeshVertexBufferLayoutRef, RenderMesh, + }, + prelude::*, + render_asset::{ + prepare_assets, PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets, + }, + render_graph::{NodeRunError, RenderGraphApp, RenderGraphContext, ViewNode, ViewNodeRunner}, + render_phase::{ + AddRenderCommand, BinnedPhaseItem, BinnedRenderPhasePlugin, BinnedRenderPhaseType, + CachedRenderPipelinePhaseItem, DrawFunctionId, DrawFunctions, PhaseItem, + PhaseItemBatchSetKey, PhaseItemExtraIndex, RenderCommand, RenderCommandResult, + SetItemPipeline, TrackedRenderPass, ViewBinnedRenderPhases, + }, + render_resource::*, + renderer::RenderContext, + sync_world::{MainEntity, MainEntityHashMap}, + view::{ + ExtractedView, NoIndirectDrawing, RenderVisibilityRanges, RenderVisibleEntities, + RetainedViewEntity, ViewDepthTexture, ViewTarget, + }, + Extract, Render, RenderApp, RenderDebugFlags, RenderSet, +}; +use core::{hash::Hash, ops::Range}; +use tracing::error; pub const WIREFRAME_SHADER_HANDLE: Handle = weak_handle!("2646a633-f8e3-4380-87ae-b44d881abbce"); @@ -24,9 +69,20 @@ pub const WIREFRAME_SHADER_HANDLE: Handle = /// /// This is a native only feature. #[derive(Debug, Default)] -pub struct WireframePlugin; +pub struct WireframePlugin { + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, +} + +impl WireframePlugin { + /// Creates a new [`WireframePlugin`] with the given debug flags. + pub fn new(debug_flags: RenderDebugFlags) -> Self { + Self { debug_flags } + } +} + impl Plugin for WireframePlugin { - fn build(&self, app: &mut bevy_app::App) { + fn build(&self, app: &mut App) { load_internal_asset!( app, WIREFRAME_SHADER_HANDLE, @@ -34,25 +90,83 @@ impl Plugin for WireframePlugin { Shader::from_wgsl ); - app.register_type::() - .register_type::() - .register_type::() - .register_type::() - .init_resource::() - .add_plugins(MaterialPlugin::::default()) - .register_asset_reflect::() - .add_systems(Startup, setup_global_wireframe_material) - .add_systems( - Update, + app.add_plugins(( + BinnedRenderPhasePlugin::::new(self.debug_flags), + RenderAssetPlugin::::default(), + )) + .init_asset::() + .init_resource::>() + .register_type::() + .register_type::() + .register_type::() + .init_resource::() + .init_resource::() + .add_systems(Startup, setup_global_wireframe_material) + .add_systems( + Update, + ( + global_color_changed.run_if(resource_changed::), + wireframe_color_changed, + // Run `apply_global_wireframe_material` after `apply_wireframe_material` so that the global + // wireframe setting is applied to a mesh on the same frame its wireframe marker component is removed. + (apply_wireframe_material, apply_global_wireframe_material).chain(), + ), + ) + .add_systems( + PostUpdate, + check_wireframe_entities_needing_specialization + .after(AssetEvents) + .run_if(resource_exists::), + ); + + let Some(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + + render_app + .init_resource::() + .init_resource::() + .init_resource::>() + .add_render_command::() + .init_resource::() + .init_resource::>() + .add_render_graph_node::>(Core3d, Node3d::Wireframe) + .add_render_graph_edges( + Core3d, ( - global_color_changed.run_if(resource_changed::), - wireframe_color_changed, - // Run `apply_global_wireframe_material` after `apply_wireframe_material` so that the global - // wireframe setting is applied to a mesh on the same frame its wireframe marker component is removed. - (apply_wireframe_material, apply_global_wireframe_material).chain(), + Node3d::EndMainPass, + Node3d::Wireframe, + Node3d::PostProcessing, + ), + ) + .add_systems( + ExtractSchedule, + ( + extract_wireframe_3d_camera, + extract_wireframe_entities_needing_specialization.after(extract_cameras), + extract_wireframe_materials, + ), + ) + .add_systems( + Render, + ( + specialize_wireframes + .in_set(RenderSet::PrepareMeshes) + .after(prepare_assets::) + .after(prepare_assets::), + queue_wireframes + .in_set(RenderSet::QueueMeshes) + .after(prepare_assets::), ), ); } + + fn finish(&self, app: &mut App) { + let Some(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + render_app.init_resource::(); + } } /// Enables wireframe rendering for any entity it is attached to. @@ -63,21 +177,261 @@ impl Plugin for WireframePlugin { #[reflect(Component, Default, Debug, PartialEq)] pub struct Wireframe; +pub struct Wireframe3d { + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: Wireframe3dBatchSetKey, + /// The key, which determines which can be batched. + pub bin_key: Wireframe3dBinKey, + /// An entity from which data will be fetched, including the mesh if + /// applicable. + pub representative_entity: (Entity, MainEntity), + /// The ranges of instances. + pub batch_range: Range, + /// An extra index, which is either a dynamic offset or an index in the + /// indirect parameters list. + pub extra_index: PhaseItemExtraIndex, +} + +impl PhaseItem for Wireframe3d { + fn entity(&self) -> Entity { + self.representative_entity.0 + } + + fn main_entity(&self) -> MainEntity { + self.representative_entity.1 + } + + fn draw_function(&self) -> DrawFunctionId { + self.batch_set_key.draw_function + } + + fn batch_range(&self) -> &Range { + &self.batch_range + } + + fn batch_range_mut(&mut self) -> &mut Range { + &mut self.batch_range + } + + fn extra_index(&self) -> PhaseItemExtraIndex { + self.extra_index.clone() + } + + fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range, &mut PhaseItemExtraIndex) { + (&mut self.batch_range, &mut self.extra_index) + } +} + +impl CachedRenderPipelinePhaseItem for Wireframe3d { + fn cached_pipeline(&self) -> CachedRenderPipelineId { + self.batch_set_key.pipeline + } +} + +impl BinnedPhaseItem for Wireframe3d { + type BinKey = Wireframe3dBinKey; + type BatchSetKey = Wireframe3dBatchSetKey; + + fn new( + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, + representative_entity: (Entity, MainEntity), + batch_range: Range, + extra_index: PhaseItemExtraIndex, + ) -> Self { + Self { + batch_set_key, + bin_key, + representative_entity, + batch_range, + extra_index, + } + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Wireframe3dBatchSetKey { + /// The identifier of the render pipeline. + pub pipeline: CachedRenderPipelineId, + + /// The wireframe material asset ID. + pub asset_id: UntypedAssetId, + + /// The function used to draw. + pub draw_function: DrawFunctionId, + /// The ID of the slab of GPU memory that contains vertex data. + /// + /// For non-mesh items, you can fill this with 0 if your items can be + /// multi-drawn, or with a unique value if they can't. + pub vertex_slab: SlabId, + + /// The ID of the slab of GPU memory that contains index data, if present. + /// + /// For non-mesh items, you can safely fill this with `None`. + pub index_slab: Option, +} + +impl PhaseItemBatchSetKey for Wireframe3dBatchSetKey { + fn indexed(&self) -> bool { + self.index_slab.is_some() + } +} + +/// Data that must be identical in order to *batch* phase items together. +/// +/// Note that a *batch set* (if multi-draw is in use) contains multiple batches. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Wireframe3dBinKey { + /// The wireframe mesh asset ID. + pub asset_id: UntypedAssetId, +} + +pub struct SetWireframe3dPushConstants; + +impl RenderCommand

for SetWireframe3dPushConstants { + type Param = ( + SRes, + SRes>, + ); + type ViewQuery = (); + type ItemQuery = (); + + #[inline] + fn render<'w>( + item: &P, + _view: (), + _item_query: Option<()>, + (wireframe_instances, wireframe_assets): SystemParamItem<'w, '_, Self::Param>, + pass: &mut TrackedRenderPass<'w>, + ) -> RenderCommandResult { + let Some(wireframe_material) = wireframe_instances.get(&item.main_entity()) else { + return RenderCommandResult::Failure("No wireframe material found for entity"); + }; + let Some(wireframe_material) = wireframe_assets.get(*wireframe_material) else { + return RenderCommandResult::Failure("No wireframe material found for entity"); + }; + + pass.set_push_constants( + ShaderStages::FRAGMENT, + 0, + bytemuck::bytes_of(&wireframe_material.color), + ); + RenderCommandResult::Success + } +} + +pub type DrawWireframe3d = ( + SetItemPipeline, + SetMeshViewBindGroup<0>, + SetMeshBindGroup<1>, + SetWireframe3dPushConstants, + DrawMesh, +); + +#[derive(Resource, Clone)] +pub struct Wireframe3dPipeline { + mesh_pipeline: MeshPipeline, + shader: Handle, +} + +impl FromWorld for Wireframe3dPipeline { + fn from_world(render_world: &mut World) -> Self { + Wireframe3dPipeline { + mesh_pipeline: render_world.resource::().clone(), + shader: WIREFRAME_SHADER_HANDLE, + } + } +} + +impl SpecializedMeshPipeline for Wireframe3dPipeline { + type Key = MeshPipelineKey; + + fn specialize( + &self, + key: Self::Key, + layout: &MeshVertexBufferLayoutRef, + ) -> Result { + let mut descriptor = self.mesh_pipeline.specialize(key, layout)?; + descriptor.label = Some("wireframe_3d_pipeline".into()); + descriptor.push_constant_ranges.push(PushConstantRange { + stages: ShaderStages::FRAGMENT, + range: 0..16, + }); + let fragment = descriptor.fragment.as_mut().unwrap(); + fragment.shader = self.shader.clone(); + descriptor.primitive.polygon_mode = PolygonMode::Line; + descriptor.depth_stencil.as_mut().unwrap().bias.slope_scale = 1.0; + Ok(descriptor) + } +} + +#[derive(Default)] +struct Wireframe3dNode; +impl ViewNode for Wireframe3dNode { + type ViewQuery = ( + &'static ExtractedCamera, + &'static ExtractedView, + &'static ViewTarget, + &'static ViewDepthTexture, + ); + + fn run<'w>( + &self, + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + (camera, view, target, depth): QueryItem<'w, Self::ViewQuery>, + world: &'w World, + ) -> Result<(), NodeRunError> { + let Some(wireframe_phase) = world.get_resource::>() + else { + return Ok(()); + }; + + let Some(wireframe_phase) = wireframe_phase.get(&view.retained_view_entity) else { + return Ok(()); + }; + + let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { + label: Some("wireframe_3d_pass"), + color_attachments: &[Some(target.get_color_attachment())], + depth_stencil_attachment: Some(depth.get_attachment(StoreOp::Store)), + timestamp_writes: None, + occlusion_query_set: None, + }); + + if let Some(viewport) = camera.viewport.as_ref() { + render_pass.set_camera_viewport(viewport); + } + + if let Err(err) = wireframe_phase.render(&mut render_pass, world, graph.view_entity()) { + error!("Error encountered while rendering the stencil phase {err:?}"); + return Err(NodeRunError::DrawError(err)); + } + + Ok(()) + } +} + /// Sets the color of the [`Wireframe`] of the entity it is attached to. /// /// If this component is present but there's no [`Wireframe`] component, /// it will still affect the color of the wireframe when [`WireframeConfig::global`] is set to true. /// /// This overrides the [`WireframeConfig::default_color`]. -// TODO: consider caching materials based on this color. -// This could blow up in size if people use random colored wireframes for each mesh. -// It will also be important to remove unused materials from the cache. #[derive(Component, Debug, Clone, Default, Reflect)] #[reflect(Component, Default, Debug)] pub struct WireframeColor { pub color: Color, } +#[derive(Component, Debug, Clone, Default)] +pub struct ExtractedWireframeColor { + pub color: [f32; 4], +} + /// Disables wireframe rendering for any entity it is attached to. /// It will ignore the [`WireframeConfig`] global setting. /// @@ -98,12 +452,112 @@ pub struct WireframeConfig { pub default_color: Color, } +#[derive(Asset, Reflect, Clone, Debug, Default)] +#[reflect(Clone, Default)] +pub struct WireframeMaterial { + pub color: Color, +} + +pub struct RenderWireframeMaterial { + pub color: [f32; 4], +} + +#[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq)] +#[reflect(Component, Default, Clone, PartialEq)] +pub struct Mesh3dWireframe(pub Handle); + +impl AsAssetId for Mesh3dWireframe { + type Asset = WireframeMaterial; + + fn as_asset_id(&self) -> AssetId { + self.0.id() + } +} + +impl RenderAsset for RenderWireframeMaterial { + type SourceAsset = WireframeMaterial; + type Param = (); + + fn prepare_asset( + source_asset: Self::SourceAsset, + _asset_id: AssetId, + _param: &mut SystemParamItem, + ) -> Result> { + Ok(RenderWireframeMaterial { + color: source_asset.color.to_linear().to_f32_array(), + }) + } +} + +#[derive(Resource, Deref, DerefMut, Default)] +pub struct RenderWireframeInstances(MainEntityHashMap>); + +#[derive(Clone, Resource, Deref, DerefMut, Debug, Default)] +pub struct WireframeEntitiesNeedingSpecialization { + #[deref] + pub entities: Vec, +} + +#[derive(Resource, Deref, DerefMut, Clone, Debug, Default)] +pub struct WireframeEntitySpecializationTicks { + pub entities: MainEntityHashMap, +} + +/// Stores the [`SpecializedWireframeViewPipelineCache`] for each view. +#[derive(Resource, Deref, DerefMut, Default)] +pub struct SpecializedWireframePipelineCache { + // view entity -> view pipeline cache + #[deref] + map: HashMap, +} + +/// Stores the cached render pipeline ID for each entity in a single view, as +/// well as the last time it was changed. +#[derive(Deref, DerefMut, Default)] +pub struct SpecializedWireframeViewPipelineCache { + // material entity -> (tick, pipeline_id) + #[deref] + map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>, +} + #[derive(Resource)] struct GlobalWireframeMaterial { // This handle will be reused when the global config is enabled handle: Handle, } +pub fn extract_wireframe_materials( + mut material_instances: ResMut, + changed_meshes_query: Extract< + Query< + (Entity, &ViewVisibility, &Mesh3dWireframe), + Or<(Changed, Changed)>, + >, + >, + mut removed_visibilities_query: Extract>, + mut removed_materials_query: Extract>, +) { + for (entity, view_visibility, material) in &changed_meshes_query { + if view_visibility.get() { + material_instances.insert(entity.into(), material.id()); + } else { + material_instances.remove(&MainEntity::from(entity)); + } + } + + for entity in removed_visibilities_query + .read() + .chain(removed_materials_query.read()) + { + // Only queue a mesh for removal if we didn't pick it up above. + // It's possible that a necessary component was removed and re-added in + // the same frame. + if !changed_meshes_query.contains(entity) { + material_instances.remove(&MainEntity::from(entity)); + } + } +} + fn setup_global_wireframe_material( mut commands: Commands, mut materials: ResMut>, @@ -112,7 +566,7 @@ fn setup_global_wireframe_material( // Create the handle used for the global material commands.insert_resource(GlobalWireframeMaterial { handle: materials.add(WireframeMaterial { - color: config.default_color.into(), + color: config.default_color, }), }); } @@ -124,7 +578,7 @@ fn global_color_changed( global_material: Res, ) { if let Some(global_material) = materials.get_mut(&global_material.handle) { - global_material.color = config.default_color.into(); + global_material.color = config.default_color; } } @@ -132,13 +586,13 @@ fn global_color_changed( fn wireframe_color_changed( mut materials: ResMut>, mut colors_changed: Query< - (&mut MeshMaterial3d, &WireframeColor), + (&mut Mesh3dWireframe, &WireframeColor), (With, Changed), >, ) { for (mut handle, wireframe_color) in &mut colors_changed { handle.0 = materials.add(WireframeMaterial { - color: wireframe_color.color.into(), + color: wireframe_color.color, }); } } @@ -150,24 +604,24 @@ fn apply_wireframe_material( mut materials: ResMut>, wireframes: Query< (Entity, Option<&WireframeColor>), - (With, Without>), + (With, Without), >, - no_wireframes: Query, With>)>, + no_wireframes: Query, With)>, mut removed_wireframes: RemovedComponents, global_material: Res, ) { for e in removed_wireframes.read().chain(no_wireframes.iter()) { - if let Some(mut commands) = commands.get_entity(e) { - commands.remove::>(); + if let Ok(mut commands) = commands.get_entity(e) { + commands.remove::(); } } let mut material_to_spawn = vec![]; for (e, maybe_color) in &wireframes { let material = get_wireframe_material(maybe_color, &mut materials, &global_material); - material_to_spawn.push((e, MeshMaterial3d(material))); + material_to_spawn.push((e, Mesh3dWireframe(material))); } - commands.insert_or_spawn_batch(material_to_spawn); + commands.try_insert_batch(material_to_spawn); } type WireframeFilter = (With, Without, Without); @@ -178,12 +632,9 @@ fn apply_global_wireframe_material( config: Res, meshes_without_material: Query< (Entity, Option<&WireframeColor>), - (WireframeFilter, Without>), - >, - meshes_with_global_material: Query< - Entity, - (WireframeFilter, With>), + (WireframeFilter, Without), >, + meshes_with_global_material: Query)>, global_material: Res, mut materials: ResMut>, ) { @@ -193,14 +644,12 @@ fn apply_global_wireframe_material( let material = get_wireframe_material(maybe_color, &mut materials, &global_material); // We only add the material handle but not the Wireframe component // This makes it easy to detect which mesh is using the global material and which ones are user specified - material_to_spawn.push((e, MeshMaterial3d(material))); + material_to_spawn.push((e, Mesh3dWireframe(material))); } - commands.insert_or_spawn_batch(material_to_spawn); + commands.try_insert_batch(material_to_spawn); } else { for e in &meshes_with_global_material { - commands - .entity(e) - .remove::>(); + commands.entity(e).remove::(); } } } @@ -213,7 +662,7 @@ fn get_wireframe_material( ) -> Handle { if let Some(wireframe_color) = maybe_color { wireframe_materials.add(WireframeMaterial { - color: wireframe_color.color.into(), + color: wireframe_color.color, }) } else { // If there's no color specified we can use the global material since it's already set to use the default_color @@ -221,27 +670,241 @@ fn get_wireframe_material( } } -#[derive(Default, AsBindGroup, Debug, Clone, Asset, Reflect)] -pub struct WireframeMaterial { - #[uniform(0)] - pub color: LinearRgba, -} - -impl Material for WireframeMaterial { - fn fragment_shader() -> ShaderRef { - WIREFRAME_SHADER_HANDLE.into() - } - - fn specialize( - _pipeline: &MaterialPipeline, - descriptor: &mut RenderPipelineDescriptor, - _layout: &MeshVertexBufferLayoutRef, - _key: MaterialPipelineKey, - ) -> Result<(), SpecializedMeshPipelineError> { - descriptor.primitive.polygon_mode = PolygonMode::Line; - if let Some(depth_stencil) = descriptor.depth_stencil.as_mut() { - depth_stencil.bias.slope_scale = 1.0; +fn extract_wireframe_3d_camera( + mut wireframe_3d_phases: ResMut>, + cameras: Extract), With>>, + mut live_entities: Local>, + gpu_preprocessing_support: Res, +) { + live_entities.clear(); + for (main_entity, camera, no_indirect_drawing) in &cameras { + if !camera.is_active { + continue; + } + let gpu_preprocessing_mode = gpu_preprocessing_support.min(if !no_indirect_drawing { + GpuPreprocessingMode::Culling + } else { + GpuPreprocessingMode::PreprocessingOnly + }); + + let retained_view_entity = RetainedViewEntity::new(main_entity.into(), None, 0); + wireframe_3d_phases.prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode); + live_entities.insert(retained_view_entity); + } + + // Clear out all dead views. + wireframe_3d_phases.retain(|camera_entity, _| live_entities.contains(camera_entity)); +} + +pub fn extract_wireframe_entities_needing_specialization( + entities_needing_specialization: Extract>, + mut entity_specialization_ticks: ResMut, + views: Query<&ExtractedView>, + mut specialized_wireframe_pipeline_cache: ResMut, + mut removed_meshes_query: Extract>, + ticks: SystemChangeTick, +) { + for entity in entities_needing_specialization.iter() { + // Update the entity's specialization tick with this run's tick + entity_specialization_ticks.insert((*entity).into(), ticks.this_run()); + } + + for entity in removed_meshes_query.read() { + for view in &views { + if let Some(specialized_wireframe_pipeline_cache) = + specialized_wireframe_pipeline_cache.get_mut(&view.retained_view_entity) + { + specialized_wireframe_pipeline_cache.remove(&MainEntity::from(entity)); + } + } + } +} + +pub fn check_wireframe_entities_needing_specialization( + needs_specialization: Query< + Entity, + Or<( + Changed, + AssetChanged, + Changed, + AssetChanged, + )>, + >, + mut entities_needing_specialization: ResMut, +) { + entities_needing_specialization.clear(); + for entity in &needs_specialization { + entities_needing_specialization.push(entity); + } +} + +pub fn specialize_wireframes( + render_meshes: Res>, + render_mesh_instances: Res, + render_wireframe_instances: Res, + render_visibility_ranges: Res, + wireframe_phases: Res>, + views: Query<(&ExtractedView, &RenderVisibleEntities)>, + view_key_cache: Res, + entity_specialization_ticks: Res, + view_specialization_ticks: Res, + mut specialized_material_pipeline_cache: ResMut, + mut pipelines: ResMut>, + pipeline: Res, + pipeline_cache: Res, + ticks: SystemChangeTick, +) { + // Record the retained IDs of all views so that we can expire old + // pipeline IDs. + let mut all_views: HashSet = HashSet::default(); + + for (view, visible_entities) in &views { + all_views.insert(view.retained_view_entity); + + if !wireframe_phases.contains_key(&view.retained_view_entity) { + continue; + } + + let Some(view_key) = view_key_cache.get(&view.retained_view_entity) else { + continue; + }; + + let view_tick = view_specialization_ticks + .get(&view.retained_view_entity) + .unwrap(); + let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache + .entry(view.retained_view_entity) + .or_default(); + + for (_, visible_entity) in visible_entities.iter::() { + if !render_wireframe_instances.contains_key(visible_entity) { + continue; + }; + let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) + else { + continue; + }; + let entity_tick = entity_specialization_ticks.get(visible_entity).unwrap(); + let last_specialized_tick = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(tick, _)| *tick); + let needs_specialization = last_specialized_tick.is_none_or(|tick| { + view_tick.is_newer_than(tick, ticks.this_run()) + || entity_tick.is_newer_than(tick, ticks.this_run()) + }); + if !needs_specialization { + continue; + } + let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else { + continue; + }; + + let mut mesh_key = *view_key; + mesh_key |= MeshPipelineKey::from_primitive_topology(mesh.primitive_topology()); + + if render_visibility_ranges.entity_has_crossfading_visibility_ranges(*visible_entity) { + mesh_key |= MeshPipelineKey::VISIBILITY_RANGE_DITHER; + } + + if view_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) { + // If the previous frame have skins or morph targets, note that. + if mesh_instance + .flags + .contains(RenderMeshInstanceFlags::HAS_PREVIOUS_SKIN) + { + mesh_key |= MeshPipelineKey::HAS_PREVIOUS_SKIN; + } + if mesh_instance + .flags + .contains(RenderMeshInstanceFlags::HAS_PREVIOUS_MORPH) + { + mesh_key |= MeshPipelineKey::HAS_PREVIOUS_MORPH; + } + } + + let pipeline_id = + pipelines.specialize(&pipeline_cache, &pipeline, mesh_key, &mesh.layout); + let pipeline_id = match pipeline_id { + Ok(id) => id, + Err(err) => { + error!("{}", err); + continue; + } + }; + + view_specialized_material_pipeline_cache + .insert(*visible_entity, (ticks.this_run(), pipeline_id)); + } + } + + // Delete specialized pipelines belonging to views that have expired. + specialized_material_pipeline_cache + .retain(|retained_view_entity, _| all_views.contains(retained_view_entity)); +} + +fn queue_wireframes( + custom_draw_functions: Res>, + render_mesh_instances: Res, + gpu_preprocessing_support: Res, + mesh_allocator: Res, + specialized_wireframe_pipeline_cache: Res, + render_wireframe_instances: Res, + mut wireframe_3d_phases: ResMut>, + mut views: Query<(&ExtractedView, &RenderVisibleEntities)>, +) { + for (view, visible_entities) in &mut views { + let Some(wireframe_phase) = wireframe_3d_phases.get_mut(&view.retained_view_entity) else { + continue; + }; + let draw_wireframe = custom_draw_functions.read().id::(); + + let Some(view_specialized_material_pipeline_cache) = + specialized_wireframe_pipeline_cache.get(&view.retained_view_entity) + else { + continue; + }; + + for (render_entity, visible_entity) in visible_entities.iter::() { + let Some(wireframe_instance) = render_wireframe_instances.get(visible_entity) else { + continue; + }; + let Some((current_change_tick, pipeline_id)) = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(current_change_tick, pipeline_id)| (*current_change_tick, *pipeline_id)) + else { + continue; + }; + + // Skip the entity if it's cached in a bin and up to date. + if wireframe_phase.validate_cached_entity(*visible_entity, current_change_tick) { + continue; + } + let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) + else { + continue; + }; + let (vertex_slab, index_slab) = mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id); + let bin_key = Wireframe3dBinKey { + asset_id: mesh_instance.mesh_asset_id.untyped(), + }; + let batch_set_key = Wireframe3dBatchSetKey { + pipeline: pipeline_id, + asset_id: wireframe_instance.untyped(), + draw_function: draw_wireframe, + vertex_slab: vertex_slab.unwrap_or_default(), + index_slab, + }; + wireframe_phase.add( + batch_set_key, + bin_key, + (*render_entity, *visible_entity), + mesh_instance.current_uniform_index, + BinnedRenderPhaseType::mesh( + mesh_instance.should_batch(), + &gpu_preprocessing_support, + ), + current_change_tick, + ); } - Ok(()) } } diff --git a/crates/bevy_picking/Cargo.toml b/crates/bevy_picking/Cargo.toml index 70d470ffc1..f02e5237aa 100644 --- a/crates/bevy_picking/Cargo.toml +++ b/crates/bevy_picking/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_picking" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides screen picking functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -26,7 +26,7 @@ bevy_time = { path = "../bevy_time", version = "0.16.0-dev" } bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } bevy_window = { path = "../bevy_window", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", ] } @@ -36,6 +36,7 @@ uuid = { version = "1.13.1", features = ["v4"] } tracing = { version = "0.1", default-features = false, features = ["std"] } [target.'cfg(target_arch = "wasm32")'.dependencies] +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. uuid = { version = "1.13.1", default-features = false, features = ["js"] } [lints] diff --git a/crates/bevy_picking/src/backend.rs b/crates/bevy_picking/src/backend.rs index 597b238620..8c781d54e3 100644 --- a/crates/bevy_picking/src/backend.rs +++ b/crates/bevy_picking/src/backend.rs @@ -56,7 +56,7 @@ pub mod prelude { /// ambiguities with picking backends. Take care to ensure such systems are explicitly ordered /// against [`PickSet::Backend`](crate::PickSet::Backend), or better, avoid reading `PointerHits` in `PreUpdate`. #[derive(Event, Debug, Clone, Reflect)] -#[reflect(Debug)] +#[reflect(Debug, Clone)] pub struct PointerHits { /// The pointer associated with this hit test. pub pointer: prelude::PointerId, @@ -96,6 +96,7 @@ impl PointerHits { /// Holds data from a successful pointer hit test. See [`HitData::depth`] for important details. #[derive(Clone, Debug, PartialEq, Reflect)] +#[reflect(Clone, PartialEq)] pub struct HitData { /// The camera entity used to detect this hit. Useful when you need to find the ray that was /// casted for this hit when using a raycasting backend. @@ -105,7 +106,8 @@ pub struct HitData { /// distance from the pointer to the hit, measured from the near plane of the camera, to the /// point, in world space. pub depth: f32, - /// The position of the intersection in the world, if the data is available from the backend. + /// The position reported by the backend, if the data is available. Position data may be in any + /// space (e.g. World space, Screen space, Local space), specified by the backend providing it. pub position: Option, /// The normal vector of the hit test, if the data is available from the backend. pub normal: Option, @@ -129,7 +131,7 @@ pub mod ray { use crate::backend::prelude::{PointerId, PointerLocation}; use bevy_ecs::prelude::*; use bevy_math::Ray3d; - use bevy_platform_support::collections::{hash_map::Iter, HashMap}; + use bevy_platform::collections::{hash_map::Iter, HashMap}; use bevy_reflect::Reflect; use bevy_render::camera::Camera; use bevy_transform::prelude::GlobalTransform; @@ -138,6 +140,7 @@ pub mod ray { /// Identifies a ray constructed from some (pointer, camera) combination. A pointer can be over /// multiple cameras, which is why a single pointer may have multiple rays. #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Reflect)] + #[reflect(Clone, PartialEq, Hash)] pub struct RayId { /// The camera whose projection was used to calculate the ray. pub camera: Entity, @@ -175,7 +178,10 @@ pub mod ray { /// ``` #[derive(Clone, Debug, Default, Resource)] pub struct RayMap { - map: HashMap, + /// Cartesian product of all pointers and all cameras + /// Add your rays here to support picking through indirections, + /// e.g. rendered-to-texture cameras + pub map: HashMap, } impl RayMap { @@ -184,11 +190,6 @@ pub mod ray { self.map.iter() } - /// The hash map of all rays cast in the current frame. - pub fn map(&self) -> &HashMap { - &self.map - } - /// Clears the [`RayMap`] and re-populates it with one ray for each /// combination of pointer entity and camera entity where the pointer /// intersects the camera's viewport. diff --git a/crates/bevy_picking/src/events.rs b/crates/bevy_picking/src/events.rs index 0660300147..88b3b9bccc 100644 --- a/crates/bevy_picking/src/events.rs +++ b/crates/bevy_picking/src/events.rs @@ -40,9 +40,10 @@ use core::{fmt::Debug, time::Duration}; use bevy_ecs::{prelude::*, query::QueryData, system::SystemParam, traversal::Traversal}; +use bevy_input::mouse::MouseScrollUnit; use bevy_math::Vec2; -use bevy_platform_support::collections::HashMap; -use bevy_platform_support::time::Instant; +use bevy_platform::collections::HashMap; +use bevy_platform::time::Instant; use bevy_reflect::prelude::*; use bevy_render::camera::NormalizedRenderTarget; use bevy_window::Window; @@ -59,7 +60,7 @@ use crate::{ /// The documentation for the [`pointer_events`] explains the events this module exposes and /// the order in which they fire. #[derive(Clone, PartialEq, Debug, Reflect, Component)] -#[reflect(Component, Debug)] +#[reflect(Component, Debug, Clone)] pub struct Pointer { /// The original target of this picking event, before bubbling pub target: Entity, @@ -78,7 +79,7 @@ pub struct Pointer { /// propagates to the pointer's window and stops there. #[derive(QueryData)] pub struct PointerTraversal { - parent: Option<&'static ChildOf>, + child_of: Option<&'static ChildOf>, window: Option<&'static Window>, } @@ -87,11 +88,11 @@ where E: Debug + Clone + Reflect, { fn traverse(item: Self::Item<'_>, pointer: &Pointer) -> Option { - let PointerTraversalItem { parent, window } = item; + let PointerTraversalItem { child_of, window } = item; // Send event to parent, if it has one. - if let Some(parent) = parent { - return Some(parent.get()); + if let Some(child_of) = child_of { + return Some(child_of.parent()); }; // Otherwise, send it to the window entity (unless this is a window entity). @@ -145,20 +146,23 @@ impl Pointer { /// Fires when a pointer is canceled, and its current interaction state is dropped. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Cancel { /// Information about the picking intersection. pub hit: HitData, } -/// Fires when a the pointer crosses into the bounds of the `target` entity. +/// Fires when a pointer crosses into the bounds of the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Over { /// Information about the picking intersection. pub hit: HitData, } -/// Fires when a the pointer crosses out of the bounds of the `target` entity. +/// Fires when a pointer crosses out of the bounds of the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Out { /// Information about the latest prior picking intersection. pub hit: HitData, @@ -166,6 +170,7 @@ pub struct Out { /// Fires when a pointer button is pressed over the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Pressed { /// Pointer button pressed to trigger this event. pub button: PointerButton, @@ -175,6 +180,7 @@ pub struct Pressed { /// Fires when a pointer button is released over the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Released { /// Pointer button lifted to trigger this event. pub button: PointerButton, @@ -185,6 +191,7 @@ pub struct Released { /// Fires when a pointer sends a pointer pressed event followed by a pointer released event, with the same /// `target` entity for both events. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Click { /// Pointer button pressed and lifted to trigger this event. pub button: PointerButton, @@ -196,6 +203,7 @@ pub struct Click { /// Fires while a pointer is moving over the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Move { /// Information about the picking intersection. pub hit: HitData, @@ -205,6 +213,7 @@ pub struct Move { /// Fires when the `target` entity receives a pointer pressed event followed by a pointer move event. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragStart { /// Pointer button pressed and moved to trigger this event. pub button: PointerButton, @@ -214,6 +223,7 @@ pub struct DragStart { /// Fires while the `target` entity is being dragged. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Drag { /// Pointer button pressed and moved to trigger this event. pub button: PointerButton, @@ -225,6 +235,7 @@ pub struct Drag { /// Fires when a pointer is dragging the `target` entity and a pointer released event is received. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragEnd { /// Pointer button pressed, moved, and released to trigger this event. pub button: PointerButton, @@ -234,6 +245,7 @@ pub struct DragEnd { /// Fires when a pointer dragging the `dragged` entity enters the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragEnter { /// Pointer button pressed to enter drag. pub button: PointerButton, @@ -245,6 +257,7 @@ pub struct DragEnter { /// Fires while the `dragged` entity is being dragged over the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragOver { /// Pointer button pressed while dragging over. pub button: PointerButton, @@ -256,6 +269,7 @@ pub struct DragOver { /// Fires when a pointer dragging the `dragged` entity leaves the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragLeave { /// Pointer button pressed while leaving drag. pub button: PointerButton, @@ -267,6 +281,7 @@ pub struct DragLeave { /// Fires when a pointer drops the `dropped` entity onto the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragDrop { /// Pointer button released to drop. pub button: PointerButton, @@ -277,7 +292,8 @@ pub struct DragDrop { } /// Dragging state. -#[derive(Debug, Clone)] +#[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragEntry { /// The position of the pointer at drag start. pub start_pos: Vec2, @@ -285,6 +301,20 @@ pub struct DragEntry { pub latest_pos: Vec2, } +/// Fires while a pointer is scrolling over the `target` entity. +#[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] +pub struct Scroll { + /// The mouse scroll unit. + pub unit: MouseScrollUnit, + /// The horizontal scroll value. + pub x: f32, + /// The vertical scroll value. + pub y: f32, + /// Information about the picking intersection. + pub hit: HitData, +} + /// An entry in the cache that drives the `pointer_events` system, storing additional data /// about pointer button presses. #[derive(Debug, Clone, Default)] @@ -346,6 +376,7 @@ pub struct PickingEventWriters<'w> { drag_leave_events: EventWriter<'w, Pointer>, drag_over_events: EventWriter<'w, Pointer>, drag_start_events: EventWriter<'w, Pointer>, + scroll_events: EventWriter<'w, Pointer>, move_events: EventWriter<'w, Pointer>, out_events: EventWriter<'w, Pointer>, over_events: EventWriter<'w, Pointer>, @@ -449,7 +480,7 @@ pub fn pointer_events( Out { hit: hit.clone() }, ); commands.trigger_targets(out_event.clone(), hovered_entity); - event_writers.out_events.send(out_event); + event_writers.out_events.write(out_event); // Possibly send DragLeave events for button in PointerButton::iter() { @@ -467,7 +498,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_leave_event.clone(), hovered_entity); - event_writers.drag_leave_events.send(drag_leave_event); + event_writers.drag_leave_events.write(drag_leave_event); } } } @@ -513,7 +544,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_enter_event.clone(), hovered_entity); - event_writers.drag_enter_events.send(drag_enter_event); + event_writers.drag_enter_events.write(drag_enter_event); } } @@ -525,7 +556,7 @@ pub fn pointer_events( Over { hit: hit.clone() }, ); commands.trigger_targets(over_event.clone(), hovered_entity); - event_writers.over_events.send(over_event); + event_writers.over_events.write(over_event); } } @@ -556,7 +587,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(pressed_event.clone(), hovered_entity); - event_writers.pressed_events.send(pressed_event); + event_writers.pressed_events.write(pressed_event); // Also insert the press into the state state .pressing @@ -585,7 +616,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(click_event.clone(), hovered_entity); - event_writers.click_events.send(click_event); + event_writers.click_events.write(click_event); } // Always send the Released event let released_event = Pointer::new( @@ -598,7 +629,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(released_event.clone(), hovered_entity); - event_writers.released_events.send(released_event); + event_writers.released_events.write(released_event); } // Then emit the drop events. @@ -616,7 +647,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_drop_event.clone(), *dragged_over); - event_writers.drag_drop_events.send(drag_drop_event); + event_writers.drag_drop_events.write(drag_drop_event); } // Emit DragEnd let drag_end_event = Pointer::new( @@ -629,7 +660,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_end_event.clone(), drag_target); - event_writers.drag_end_events.send(drag_end_event); + event_writers.drag_end_events.write(drag_end_event); // Emit DragLeave for (dragged_over, hit) in state.dragging_over.iter() { let drag_leave_event = Pointer::new( @@ -643,7 +674,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_leave_event.clone(), *dragged_over); - event_writers.drag_leave_events.send(drag_leave_event); + event_writers.drag_leave_events.write(drag_leave_event); } } @@ -683,7 +714,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_start_event.clone(), *press_target); - event_writers.drag_start_events.send(drag_start_event); + event_writers.drag_start_events.write(drag_start_event); } // Emit Drag events to the entities we are dragging @@ -703,7 +734,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_event.clone(), *drag_target); - event_writers.drag_events.send(drag_event); + event_writers.drag_events.write(drag_event); // Update drag position drag.latest_pos = location.position; @@ -726,7 +757,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_over_event.clone(), hovered_entity); - event_writers.drag_over_events.send(drag_over_event); + event_writers.drag_over_events.write(drag_over_event); } } } @@ -747,7 +778,29 @@ pub fn pointer_events( }, ); commands.trigger_targets(move_event.clone(), hovered_entity); - event_writers.move_events.send(move_event); + event_writers.move_events.write(move_event); + } + } + PointerAction::Scroll { x, y, unit } => { + for (hovered_entity, hit) in hover_map + .get(&pointer_id) + .iter() + .flat_map(|h| h.iter().map(|(entity, data)| (*entity, data.clone()))) + { + // Emit Scroll events to the entities we are hovering + let scroll_event = Pointer::new( + pointer_id, + location.clone(), + hovered_entity, + Scroll { + unit, + x, + y, + hit: hit.clone(), + }, + ); + commands.trigger_targets(scroll_event.clone(), hovered_entity); + event_writers.scroll_events.write(scroll_event); } } // Canceled @@ -761,7 +814,7 @@ pub fn pointer_events( let cancel_event = Pointer::new(pointer_id, location.clone(), hovered_entity, Cancel { hit }); commands.trigger_targets(cancel_event.clone(), hovered_entity); - event_writers.cancel_events.send(cancel_event); + event_writers.cancel_events.write(cancel_event); } // Clear the state for the canceled pointer pointer_state.clear(pointer_id); diff --git a/crates/bevy_picking/src/hover.rs b/crates/bevy_picking/src/hover.rs index 69edb6d9aa..6347568c02 100644 --- a/crates/bevy_picking/src/hover.rs +++ b/crates/bevy_picking/src/hover.rs @@ -16,7 +16,7 @@ use crate::{ use bevy_derive::{Deref, DerefMut}; use bevy_ecs::prelude::*; use bevy_math::FloatOrd; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::prelude::*; type DepthSortedHits = Vec<(Entity, HitData)>; @@ -131,9 +131,7 @@ fn build_over_map( .filter(|e| !cancelled_pointers.contains(&e.pointer)) { let pointer = entities_under_pointer.pointer; - let layer_map = pointer_over_map - .entry(pointer) - .or_insert_with(BTreeMap::new); + let layer_map = pointer_over_map.entry(pointer).or_default(); for (entity, pick_data) in entities_under_pointer.picks.iter() { let layer = entities_under_pointer.order; let hits = layer_map.entry(FloatOrd(layer)).or_default(); @@ -189,7 +187,7 @@ fn build_hover_map( /// the entity will be considered pressed. If that entity is instead being hovered by both pointers, /// it will be considered hovered. #[derive(Component, Copy, Clone, Default, Eq, PartialEq, Debug, Reflect)] -#[reflect(Component, Default, PartialEq, Debug)] +#[reflect(Component, Default, PartialEq, Debug, Clone)] pub enum PickingInteraction { /// The entity is being pressed down by a pointer. Pressed = 2, @@ -244,7 +242,7 @@ pub fn update_interactions( for (hovered_entity, new_interaction) in new_interaction_state.drain() { if let Ok(mut interaction) = interact.get_mut(hovered_entity) { *interaction = new_interaction; - } else if let Some(mut entity_commands) = commands.get_entity(hovered_entity) { + } else if let Ok(mut entity_commands) = commands.get_entity(hovered_entity) { entity_commands.try_insert(new_interaction); } } diff --git a/crates/bevy_picking/src/input.rs b/crates/bevy_picking/src/input.rs index a0b4ac0830..712e612224 100644 --- a/crates/bevy_picking/src/input.rs +++ b/crates/bevy_picking/src/input.rs @@ -14,12 +14,13 @@ use bevy_app::prelude::*; use bevy_ecs::prelude::*; use bevy_input::{ + mouse::MouseWheel, prelude::*, touch::{TouchInput, TouchPhase}, ButtonState, }; use bevy_math::Vec2; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_reflect::prelude::*; use bevy_render::camera::RenderTarget; use bevy_window::{PrimaryWindow, WindowEvent, WindowRef}; @@ -47,7 +48,7 @@ pub mod prelude { /// This plugin contains several settings, and is added to the world as a resource after initialization. /// You can configure pointer input settings at runtime by accessing the resource. #[derive(Copy, Clone, Resource, Debug, Reflect)] -#[reflect(Resource, Default)] +#[reflect(Resource, Default, Clone)] pub struct PointerInputPlugin { /// Should touch inputs be updated? pub is_touch_enabled: bool, @@ -117,14 +118,14 @@ pub fn mouse_pick_events( WindowEvent::CursorMoved(event) => { let location = Location { target: match RenderTarget::Window(WindowRef::Entity(event.window)) - .normalize(primary_window.get_single().ok()) + .normalize(primary_window.single().ok()) { Some(target) => target, None => continue, }, position: event.position, }; - pointer_events.send(PointerInput::new( + pointer_events.write(PointerInput::new( PointerId::Mouse, location, PointerAction::Move { @@ -137,7 +138,7 @@ pub fn mouse_pick_events( WindowEvent::MouseButtonInput(input) => { let location = Location { target: match RenderTarget::Window(WindowRef::Entity(input.window)) - .normalize(primary_window.get_single().ok()) + .normalize(primary_window.single().ok()) { Some(target) => target, None => continue, @@ -154,7 +155,24 @@ pub fn mouse_pick_events( ButtonState::Pressed => PointerAction::Press(button), ButtonState::Released => PointerAction::Release(button), }; - pointer_events.send(PointerInput::new(PointerId::Mouse, location, action)); + pointer_events.write(PointerInput::new(PointerId::Mouse, location, action)); + } + WindowEvent::MouseWheel(event) => { + let MouseWheel { unit, x, y, window } = *event; + + let location = Location { + target: match RenderTarget::Window(WindowRef::Entity(window)) + .normalize(primary_window.single().ok()) + { + Some(target) => target, + None => continue, + }, + position: *cursor_last, + }; + + let action = PointerAction::Scroll { x, y, unit }; + + pointer_events.write(PointerInput::new(PointerId::Mouse, location, action)); } _ => {} } @@ -177,7 +195,7 @@ pub fn touch_pick_events( let pointer = PointerId::Touch(touch.id); let location = Location { target: match RenderTarget::Window(WindowRef::Entity(touch.window)) - .normalize(primary_window.get_single().ok()) + .normalize(primary_window.single().ok()) { Some(target) => target, None => continue, @@ -189,7 +207,7 @@ pub fn touch_pick_events( debug!("Spawning pointer {:?}", pointer); commands.spawn((pointer, PointerLocation::new(location.clone()))); - pointer_events.send(PointerInput::new( + pointer_events.write(PointerInput::new( pointer, location, PointerAction::Press(PointerButton::Primary), @@ -203,7 +221,7 @@ pub fn touch_pick_events( if last_touch == touch { continue; } - pointer_events.send(PointerInput::new( + pointer_events.write(PointerInput::new( pointer, location, PointerAction::Move { @@ -214,7 +232,7 @@ pub fn touch_pick_events( touch_cache.insert(touch.id, *touch); } TouchPhase::Ended => { - pointer_events.send(PointerInput::new( + pointer_events.write(PointerInput::new( pointer, location, PointerAction::Release(PointerButton::Primary), @@ -222,7 +240,7 @@ pub fn touch_pick_events( touch_cache.remove(&touch.id); } TouchPhase::Canceled => { - pointer_events.send(PointerInput::new( + pointer_events.write(PointerInput::new( pointer, location, PointerAction::Cancel, diff --git a/crates/bevy_picking/src/lib.rs b/crates/bevy_picking/src/lib.rs index 9e825a3826..6afe86b0d6 100644 --- a/crates/bevy_picking/src/lib.rs +++ b/crates/bevy_picking/src/lib.rs @@ -7,7 +7,7 @@ //! allows you to express more complex interactions, like detecting when a touch input drags a UI //! element and drops it on a 3d mesh rendered to a different camera. //! -//! Pointer events bubble up the entity hieararchy and can be used with observers, allowing you to +//! Pointer events bubble up the entity hierarchy and can be used with observers, allowing you to //! succinctly express rich interaction behaviors by attaching pointer callbacks to entities: //! //! ```rust @@ -64,7 +64,7 @@ //! commands.entity(trigger.target()).despawn(); //! }) //! .observe(|trigger: Trigger>, mut events: EventWriter| { -//! events.send(Greeting); +//! events.write(Greeting); //! }); //! } //! ``` @@ -179,7 +179,7 @@ pub mod prelude { #[doc(hidden)] pub use crate::mesh_picking::{ ray_cast::{MeshRayCast, MeshRayCastSettings, RayCastBackfaces, RayCastVisibility}, - MeshPickingPlugin, MeshPickingSettings, RayCastPickable, + MeshPickingCamera, MeshPickingPlugin, MeshPickingSettings, }; #[doc(hidden)] pub use crate::{ @@ -195,7 +195,7 @@ pub mod prelude { /// /// See the documentation on the fields for more details. #[derive(Component, Debug, Clone, Reflect, PartialEq, Eq)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub struct Pickable { /// Should this entity block entities below it from being picked? /// @@ -300,7 +300,7 @@ impl PluginGroup for DefaultPickingPlugins { /// This plugin contains several settings, and is added to the world as a resource after initialization. You /// can configure picking settings at runtime through the resource. #[derive(Copy, Clone, Debug, Resource, Reflect)] -#[reflect(Resource, Default, Debug)] +#[reflect(Resource, Default, Debug, Clone)] pub struct PickingPlugin { /// Enables and disables all picking features. pub is_enabled: bool, @@ -422,6 +422,7 @@ impl Plugin for InteractionPlugin { .add_event::>() .add_event::>() .add_event::>() + .add_event::>() .add_systems( PreUpdate, (generate_hovermap, update_interactions, pointer_events) diff --git a/crates/bevy_picking/src/mesh_picking/mod.rs b/crates/bevy_picking/src/mesh_picking/mod.rs index 57a49a2b0b..1e7e45bc2d 100644 --- a/crates/bevy_picking/src/mesh_picking/mod.rs +++ b/crates/bevy_picking/src/mesh_picking/mod.rs @@ -4,9 +4,15 @@ //! by adding [`Pickable::IGNORE`]. //! //! To make mesh picking entirely opt-in, set [`MeshPickingSettings::require_markers`] -//! to `true` and add a [`RayCastPickable`] component to the desired camera and target entities. +//! to `true` and add [`MeshPickingCamera`] and [`Pickable`] components to the desired camera and +//! target entities. //! //! To manually perform mesh ray casts independent of picking, use the [`MeshRayCast`] system parameter. +//! +//! ## Implementation Notes +//! +//! - The `position` reported in `HitData` is in world space. The `normal` is a vector pointing +//! away from the face, it is not guaranteed to be normalized for scaled meshes. pub mod ray_cast; @@ -21,12 +27,19 @@ use bevy_reflect::prelude::*; use bevy_render::{prelude::*, view::RenderLayers}; use ray_cast::{MeshRayCast, MeshRayCastSettings, RayCastVisibility, SimplifiedMesh}; +/// An optional component that marks cameras that should be used in the [`MeshPickingPlugin`]. +/// +/// Only needed if [`MeshPickingSettings::require_markers`] is set to `true`, and ignored otherwise. +#[derive(Debug, Clone, Default, Component, Reflect)] +#[reflect(Debug, Default, Component)] +pub struct MeshPickingCamera; + /// Runtime settings for the [`MeshPickingPlugin`]. #[derive(Resource, Reflect)] #[reflect(Resource, Default)] pub struct MeshPickingSettings { - /// When set to `true` ray casting will only happen between cameras and entities marked with - /// [`RayCastPickable`]. `false` by default. + /// When set to `true` ray casting will only consider cameras marked with + /// [`MeshPickingCamera`] and entities marked with [`Pickable`]. `false` by default. /// /// This setting is provided to give you fine-grained control over which cameras and entities /// should be used by the mesh picking backend at runtime. @@ -49,12 +62,6 @@ impl Default for MeshPickingSettings { } } -/// An optional component that marks cameras and target entities that should be used in the [`MeshPickingPlugin`]. -/// Only needed if [`MeshPickingSettings::require_markers`] is set to `true`, and ignored otherwise. -#[derive(Debug, Clone, Default, Component, Reflect)] -#[reflect(Component, Default)] -pub struct RayCastPickable; - /// Adds the mesh picking backend to your app. #[derive(Clone, Default)] pub struct MeshPickingPlugin; @@ -62,7 +69,8 @@ pub struct MeshPickingPlugin; impl Plugin for MeshPickingPlugin { fn build(&self, app: &mut App) { app.init_resource::() - .register_type::<(RayCastPickable, MeshPickingSettings, SimplifiedMesh)>() + .register_type::() + .register_type::() .add_systems(PreUpdate, update_hits.in_set(PickSet::Backend)); } } @@ -71,18 +79,18 @@ impl Plugin for MeshPickingPlugin { pub fn update_hits( backend_settings: Res, ray_map: Res, - picking_cameras: Query<(&Camera, Option<&RayCastPickable>, Option<&RenderLayers>)>, + picking_cameras: Query<(&Camera, Has, Option<&RenderLayers>)>, pickables: Query<&Pickable>, - marked_targets: Query<&RayCastPickable>, + marked_targets: Query<&Pickable>, layers: Query<&RenderLayers>, mut ray_cast: MeshRayCast, mut output: EventWriter, ) { - for (&ray_id, &ray) in ray_map.map().iter() { - let Ok((camera, cam_pickable, cam_layers)) = picking_cameras.get(ray_id.camera) else { + for (&ray_id, &ray) in ray_map.iter() { + let Ok((camera, cam_can_pick, cam_layers)) = picking_cameras.get(ray_id.camera) else { continue; }; - if backend_settings.require_markers && cam_pickable.is_none() { + if backend_settings.require_markers && !cam_can_pick { continue; } @@ -123,7 +131,7 @@ pub fn update_hits( .collect::>(); let order = camera.order as f32; if !picks.is_empty() { - output.send(PointerHits::new(ray_id.pointer, picks, order)); + output.write(PointerHits::new(ray_id.pointer, picks, order)); } } } diff --git a/crates/bevy_picking/src/mesh_picking/ray_cast/intersections.rs b/crates/bevy_picking/src/mesh_picking/ray_cast/intersections.rs index d4ec97e1f3..9988a96e19 100644 --- a/crates/bevy_picking/src/mesh_picking/ray_cast/intersections.rs +++ b/crates/bevy_picking/src/mesh_picking/ray_cast/intersections.rs @@ -1,11 +1,12 @@ use bevy_math::{bounding::Aabb3d, Dir3, Mat4, Ray3d, Vec3, Vec3A}; +use bevy_mesh::{Indices, Mesh, PrimitiveTopology}; use bevy_reflect::Reflect; -use bevy_render::mesh::{Indices, Mesh, PrimitiveTopology}; use super::Backfaces; /// Hit data for an intersection between a ray and a mesh. #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub struct RayMeshHit { /// The point of intersection in world space. pub point: Vec3, @@ -66,160 +67,135 @@ pub fn ray_mesh_intersection + Clone + Copy>( indices: Option<&[I]>, backface_culling: Backfaces, ) -> Option { - // The ray cast can hit the same mesh many times, so we need to track which hit is - // closest to the camera, and record that. - let mut closest_hit_distance = f32::MAX; - let mut closest_hit = None; - let world_to_mesh = mesh_transform.inverse(); - let mesh_space_ray = Ray3d::new( + let ray = Ray3d::new( world_to_mesh.transform_point3(ray.origin), Dir3::new(world_to_mesh.transform_vector3(*ray.direction)).ok()?, ); - if let Some(indices) = indices { + let closest_hit = if let Some(indices) = indices { // The index list must be a multiple of three. If not, the mesh is malformed and the raycast // result might be nonsensical. if indices.len() % 3 != 0 { return None; } - for triangle in indices.chunks_exact(3) { - let [a, b, c] = [ - triangle[0].try_into().ok()?, - triangle[1].try_into().ok()?, - triangle[2].try_into().ok()?, - ]; + indices + .chunks_exact(3) + .enumerate() + .fold( + (f32::MAX, None), + |(closest_distance, closest_hit), (tri_idx, triangle)| { + let [Ok(a), Ok(b), Ok(c)] = [ + triangle[0].try_into(), + triangle[1].try_into(), + triangle[2].try_into(), + ] else { + return (closest_distance, closest_hit); + }; - let triangle_index = Some(a); - let tri_vertex_positions = &[ - Vec3::from(positions[a]), - Vec3::from(positions[b]), - Vec3::from(positions[c]), - ]; - let tri_normals = vertex_normals.map(|normals| { - [ - Vec3::from(normals[a]), - Vec3::from(normals[b]), - Vec3::from(normals[c]), - ] - }); + let tri_vertices = match [positions.get(a), positions.get(b), positions.get(c)] + { + [Some(a), Some(b), Some(c)] => { + [Vec3::from(*a), Vec3::from(*b), Vec3::from(*c)] + } + _ => return (closest_distance, closest_hit), + }; - let Some(hit) = triangle_intersection( - tri_vertex_positions, - tri_normals.as_ref(), - closest_hit_distance, - &mesh_space_ray, - backface_culling, - ) else { - continue; - }; - - closest_hit = Some(RayMeshHit { - point: mesh_transform.transform_point3(hit.point), - normal: mesh_transform.transform_vector3(hit.normal), - barycentric_coords: hit.barycentric_coords, - distance: mesh_transform - .transform_vector3(mesh_space_ray.direction * hit.distance) - .length(), - triangle: hit.triangle.map(|tri| { - [ - mesh_transform.transform_point3(tri[0]), - mesh_transform.transform_point3(tri[1]), - mesh_transform.transform_point3(tri[2]), - ] - }), - triangle_index, - }); - closest_hit_distance = hit.distance; - } + match ray_triangle_intersection(&ray, &tri_vertices, backface_culling) { + Some(hit) if hit.distance >= 0. && hit.distance < closest_distance => { + (hit.distance, Some((tri_idx, hit))) + } + _ => (closest_distance, closest_hit), + } + }, + ) + .1 } else { - for (i, triangle) in positions.chunks_exact(3).enumerate() { - let &[a, b, c] = triangle else { - continue; - }; - let triangle_index = Some(i); - let tri_vertex_positions = &[Vec3::from(a), Vec3::from(b), Vec3::from(c)]; - let tri_normals = vertex_normals.map(|normals| { - [ - Vec3::from(normals[i]), - Vec3::from(normals[i + 1]), - Vec3::from(normals[i + 2]), - ] - }); + positions + .chunks_exact(3) + .enumerate() + .fold( + (f32::MAX, None), + |(closest_distance, closest_hit), (tri_idx, triangle)| { + let tri_vertices = [ + Vec3::from(triangle[0]), + Vec3::from(triangle[1]), + Vec3::from(triangle[2]), + ]; - let Some(hit) = triangle_intersection( - tri_vertex_positions, - tri_normals.as_ref(), - closest_hit_distance, - &mesh_space_ray, - backface_culling, - ) else { - continue; - }; - - closest_hit = Some(RayMeshHit { - point: mesh_transform.transform_point3(hit.point), - normal: mesh_transform.transform_vector3(hit.normal), - barycentric_coords: hit.barycentric_coords, - distance: mesh_transform - .transform_vector3(mesh_space_ray.direction * hit.distance) - .length(), - triangle: hit.triangle.map(|tri| { - [ - mesh_transform.transform_point3(tri[0]), - mesh_transform.transform_point3(tri[1]), - mesh_transform.transform_point3(tri[2]), - ] - }), - triangle_index, - }); - closest_hit_distance = hit.distance; - } - } - - closest_hit -} - -fn triangle_intersection( - tri_vertices: &[Vec3; 3], - tri_normals: Option<&[Vec3; 3]>, - max_distance: f32, - ray: &Ray3d, - backface_culling: Backfaces, -) -> Option { - let hit = ray_triangle_intersection(ray, tri_vertices, backface_culling)?; - - if hit.distance < 0.0 || hit.distance > max_distance { - return None; + match ray_triangle_intersection(&ray, &tri_vertices, backface_culling) { + Some(hit) if hit.distance >= 0. && hit.distance < closest_distance => { + (hit.distance, Some((tri_idx, hit))) + } + _ => (closest_distance, closest_hit), + } + }, + ) + .1 }; - let point = ray.get_point(hit.distance); - let u = hit.barycentric_coords.0; - let v = hit.barycentric_coords.1; - let w = 1.0 - u - v; - let barycentric = Vec3::new(u, v, w); + closest_hit.and_then(|(tri_idx, hit)| { + let [a, b, c] = match indices { + Some(indices) => { + let triangle = indices.get((tri_idx * 3)..(tri_idx * 3 + 3))?; - let normal = if let Some(normals) = tri_normals { - normals[1] * u + normals[2] * v + normals[0] * w - } else { - (tri_vertices[1] - tri_vertices[0]) - .cross(tri_vertices[2] - tri_vertices[0]) - .normalize() - }; + let [Ok(a), Ok(b), Ok(c)] = [ + triangle[0].try_into(), + triangle[1].try_into(), + triangle[2].try_into(), + ] else { + return None; + }; - Some(RayMeshHit { - point, - normal, - barycentric_coords: barycentric, - distance: hit.distance, - triangle: Some(*tri_vertices), - triangle_index: None, + [a, b, c] + } + None => [tri_idx * 3, tri_idx * 3 + 1, tri_idx * 3 + 2], + }; + + let tri_vertices = match [positions.get(a), positions.get(b), positions.get(c)] { + [Some(a), Some(b), Some(c)] => [Vec3::from(*a), Vec3::from(*b), Vec3::from(*c)], + _ => return None, + }; + + let tri_normals = vertex_normals.and_then(|normals| { + let [Some(a), Some(b), Some(c)] = [normals.get(a), normals.get(b), normals.get(c)] + else { + return None; + }; + Some([Vec3::from(*a), Vec3::from(*b), Vec3::from(*c)]) + }); + + let point = ray.get_point(hit.distance); + let u = hit.barycentric_coords.0; + let v = hit.barycentric_coords.1; + let w = 1.0 - u - v; + let barycentric = Vec3::new(u, v, w); + + let normal = if let Some(normals) = tri_normals { + normals[1] * u + normals[2] * v + normals[0] * w + } else { + (tri_vertices[1] - tri_vertices[0]) + .cross(tri_vertices[2] - tri_vertices[0]) + .normalize() + }; + + Some(RayMeshHit { + point: mesh_transform.transform_point3(point), + normal: mesh_transform.transform_vector3(normal), + barycentric_coords: barycentric, + distance: mesh_transform + .transform_vector3(ray.direction * hit.distance) + .length(), + triangle: Some(tri_vertices.map(|v| mesh_transform.transform_point3(v))), + triangle_index: Some(tri_idx), + }) }) } /// Takes a ray and triangle and computes the intersection. +#[inline] fn ray_triangle_intersection( ray: &Ray3d, triangle: &[Vec3; 3], @@ -313,6 +289,7 @@ pub fn ray_aabb_intersection_3d(ray: Ray3d, aabb: &Aabb3d, model_to_world: &Mat4 #[cfg(test)] mod tests { use bevy_math::Vec3; + use bevy_transform::components::GlobalTransform; use super::*; @@ -336,4 +313,174 @@ mod tests { let result = ray_triangle_intersection(&ray, &triangle, Backfaces::Cull); assert!(result.is_none()); } + + #[test] + fn ray_mesh_intersection_simple() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals = None; + let indices: Option<&[u16]> = None; + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_some()); + } + + #[test] + fn ray_mesh_intersection_indices() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals = None; + let indices: Option<&[u16]> = Some(&[0, 1, 2]); + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_some()); + } + + #[test] + fn ray_mesh_intersection_indices_vertex_normals() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals: Option<&[[f32; 3]]> = + Some(&[[-1., 0., 0.], [-1., 0., 0.], [-1., 0., 0.]]); + let indices: Option<&[u16]> = Some(&[0, 1, 2]); + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_some()); + } + + #[test] + fn ray_mesh_intersection_vertex_normals() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals: Option<&[[f32; 3]]> = + Some(&[[-1., 0., 0.], [-1., 0., 0.], [-1., 0., 0.]]); + let indices: Option<&[u16]> = None; + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_some()); + } + + #[test] + fn ray_mesh_intersection_missing_vertex_normals() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals: Option<&[[f32; 3]]> = Some(&[]); + let indices: Option<&[u16]> = None; + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_some()); + } + + #[test] + fn ray_mesh_intersection_indices_missing_vertex_normals() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals: Option<&[[f32; 3]]> = Some(&[]); + let indices: Option<&[u16]> = Some(&[0, 1, 2]); + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_some()); + } + + #[test] + fn ray_mesh_intersection_not_enough_indices() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals = None; + let indices: Option<&[u16]> = Some(&[0]); + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_none()); + } + + #[test] + fn ray_mesh_intersection_bad_indices() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals = None; + let indices: Option<&[u16]> = Some(&[0, 1, 3]); + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_none()); + } } diff --git a/crates/bevy_picking/src/mesh_picking/ray_cast/mod.rs b/crates/bevy_picking/src/mesh_picking/ray_cast/mod.rs index ef6f187416..c1f465b96a 100644 --- a/crates/bevy_picking/src/mesh_picking/ray_cast/mod.rs +++ b/crates/bevy_picking/src/mesh_picking/ray_cast/mod.rs @@ -7,8 +7,8 @@ mod intersections; use bevy_derive::{Deref, DerefMut}; use bevy_math::{bounding::Aabb3d, Ray3d}; +use bevy_mesh::Mesh; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; -use bevy_render::mesh::Mesh; use intersections::*; pub use intersections::{ray_aabb_intersection_3d, ray_mesh_intersection, RayMeshHit}; @@ -22,6 +22,7 @@ use tracing::*; /// How a ray cast should handle [`Visibility`]. #[derive(Clone, Copy, Reflect)] +#[reflect(Clone)] pub enum RayCastVisibility { /// Completely ignore visibility checks. Hidden items can still be ray casted against. Any, @@ -89,7 +90,7 @@ impl<'a> Default for MeshRayCastSettings<'a> { /// /// By default, backfaces are culled. #[derive(Copy, Clone, Default, Reflect)] -#[reflect(Default)] +#[reflect(Default, Clone)] pub enum Backfaces { /// Cull backfaces. #[default] @@ -100,14 +101,14 @@ pub enum Backfaces { /// Disables backface culling for [ray casts](MeshRayCast) on this entity. #[derive(Component, Copy, Clone, Default, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct RayCastBackfaces; /// A simplified mesh component that can be used for [ray casting](super::MeshRayCast). /// /// Consider using this component for complex meshes that don't need perfectly accurate ray casting. #[derive(Component, Clone, Debug, Deref, DerefMut, Reflect)] -#[reflect(Component, Debug)] +#[reflect(Component, Debug, Clone)] pub struct SimplifiedMesh(pub Handle); type MeshFilter = Or<(With, With, With)>; diff --git a/crates/bevy_picking/src/pointer.rs b/crates/bevy_picking/src/pointer.rs index ae5cf9133b..e180a9c1be 100644 --- a/crates/bevy_picking/src/pointer.rs +++ b/crates/bevy_picking/src/pointer.rs @@ -9,8 +9,9 @@ //! driven by lower-level input devices and consumed by higher-level interaction systems. use bevy_ecs::prelude::*; +use bevy_input::mouse::MouseScrollUnit; use bevy_math::Vec2; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::prelude::*; use bevy_render::camera::{Camera, NormalizedRenderTarget}; use bevy_window::PrimaryWindow; @@ -27,7 +28,7 @@ use crate::backend::HitData; /// stable ID that persists regardless of the Entity they are associated with. #[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Hash, Component, Reflect)] #[require(PointerLocation, PointerPress, PointerInteraction)] -#[reflect(Component, Default, Debug, Hash, PartialEq)] +#[reflect(Component, Default, Debug, Hash, PartialEq, Clone)] pub enum PointerId { /// The mouse pointer. #[default] @@ -36,7 +37,7 @@ pub enum PointerId { Touch(u64), /// A custom, uniquely identified pointer. Useful for mocking inputs or implementing a software /// controlled cursor. - #[reflect(ignore)] + #[reflect(ignore, clone)] Custom(Uuid), } @@ -66,7 +67,7 @@ impl PointerId { /// Holds a list of entities this pointer is currently interacting with, sorted from nearest to /// farthest. #[derive(Debug, Default, Clone, Component, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct PointerInteraction { pub(crate) sorted_entities: Vec<(Entity, HitData)>, } @@ -109,7 +110,7 @@ pub fn update_pointer_map(pointers: Query<(Entity, &PointerId)>, mut map: ResMut /// Tracks the state of the pointer's buttons in response to [`PointerInput`] events. #[derive(Debug, Default, Clone, Component, Reflect, PartialEq, Eq)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub struct PointerPress { primary: bool, secondary: bool, @@ -144,6 +145,7 @@ impl PointerPress { /// The stage of the pointer button press event #[derive(Debug, Clone, Copy, PartialEq, Eq, Reflect)] +#[reflect(Clone, PartialEq)] pub enum PressDirection { /// The pointer button was just pressed Pressed, @@ -153,6 +155,7 @@ pub enum PressDirection { /// The button that was just pressed or released #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Reflect)] +#[reflect(Clone, PartialEq)] pub enum PointerButton { /// The primary pointer button Primary, @@ -171,11 +174,11 @@ impl PointerButton { /// Component that tracks a pointer's current [`Location`]. #[derive(Debug, Default, Clone, Component, Reflect, PartialEq)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub struct PointerLocation { /// The [`Location`] of the pointer. Note that a location is both the target, and the position /// on the target. - #[reflect(ignore)] + #[reflect(ignore, clone)] pub location: Option, } @@ -203,7 +206,7 @@ impl PointerLocation { /// render target. It is up to picking backends to associate a Pointer's `Location` with a /// specific `Camera`, if any. #[derive(Debug, Clone, Component, Reflect, PartialEq)] -#[reflect(Component, Debug, PartialEq)] +#[reflect(Component, Debug, PartialEq, Clone)] pub struct Location { /// The [`NormalizedRenderTarget`] associated with the pointer, usually a window. pub target: NormalizedRenderTarget, @@ -223,7 +226,7 @@ impl Location { ) -> bool { if camera .target - .normalize(Some(match primary_window.get_single() { + .normalize(Some(match primary_window.single() { Ok(w) => w, Err(_) => return false, })) @@ -241,6 +244,7 @@ impl Location { /// Event sent to drive a pointer. #[derive(Debug, Clone, Copy, Reflect)] +#[reflect(Clone)] pub enum PointerAction { /// Causes the pointer to press a button. Press(PointerButton), @@ -251,12 +255,22 @@ pub enum PointerAction { /// How much the pointer moved from the previous position. delta: Vec2, }, + /// Scroll the pointer + Scroll { + /// The mouse scroll unit. + unit: MouseScrollUnit, + /// The horizontal scroll value. + x: f32, + /// The vertical scroll value. + y: f32, + }, /// Cancel the pointer. Often used for touch events. Cancel, } /// An input event effecting a pointer. #[derive(Event, Debug, Clone, Reflect)] +#[reflect(Clone)] pub struct PointerInput { /// The id of the pointer. pub pointer_id: PointerId, diff --git a/crates/bevy_picking/src/window.rs b/crates/bevy_picking/src/window.rs index f55edca2dd..30093da797 100644 --- a/crates/bevy_picking/src/window.rs +++ b/crates/bevy_picking/src/window.rs @@ -6,6 +6,10 @@ //! window will be inserted as a pointer hit, listed behind all other pointer //! hits. This means that when the pointer isn't hovering any other entities, //! the picking events will be routed to the window. +//! +//! ## Implementation Notes +//! +//! - This backend does not provide `position` or `normal` in `HitData`. use core::f32; @@ -35,7 +39,7 @@ pub fn update_window_hits( { let entity = window_ref.entity(); let hit_data = HitData::new(entity, 0.0, None, None); - output_events.send(PointerHits::new( + output_events.write(PointerHits::new( *pointer_id, vec![(entity, hit_data)], f32::NEG_INFINITY, diff --git a/crates/bevy_platform_support/Cargo.toml b/crates/bevy_platform/Cargo.toml similarity index 52% rename from crates/bevy_platform_support/Cargo.toml rename to crates/bevy_platform/Cargo.toml index 630b928f2d..bd6402b36a 100644 --- a/crates/bevy_platform_support/Cargo.toml +++ b/crates/bevy_platform/Cargo.toml @@ -1,8 +1,8 @@ [package] -name = "bevy_platform_support" +name = "bevy_platform" version = "0.16.0-dev" -edition = "2021" -description = "Platform compatibility support for Bevy Engine" +edition = "2024" +description = "Provides common platform agnostic APIs, as well as platform-specific features for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" @@ -14,7 +14,10 @@ default = ["std"] # Functionality ## Adds serialization support through `serde`. -serialize = ["hashbrown/serde"] +serialize = ["dep:serde", "hashbrown/serde"] + +## Adds integration with Rayon. +rayon = ["dep:rayon", "hashbrown/rayon"] # Platform Compatibility @@ -24,32 +27,27 @@ serialize = ["hashbrown/serde"] std = [ "alloc", "critical-section?/std", - "portable-atomic?/std", - "portable-atomic-util?/std", + "portable-atomic/std", + "portable-atomic-util/std", "spin/std", "foldhash/std", + "serde?/std", ] -alloc = ["portable-atomic-util?/alloc", "dep:hashbrown"] +## Allows access to the `alloc` crate. +alloc = ["portable-atomic-util/alloc", "dep:hashbrown", "serde?/alloc"] ## `critical-section` provides the building blocks for synchronization primitives ## on all platforms, including `no_std`. -critical-section = ["dep:critical-section", "portable-atomic?/critical-section"] +critical-section = ["dep:critical-section", "portable-atomic/critical-section"] -## `portable-atomic` provides additional platform support for atomic types and -## operations, even on targets without native support. -portable-atomic = [ - "dep:portable-atomic", - "dep:portable-atomic-util", - "spin/portable_atomic", -] +## Enables use of browser APIs. +## Note this is currently only applicable on `wasm32` architectures. +web = ["dep:web-time", "dep:getrandom"] [dependencies] +cfg-if = "1.0.0" critical-section = { version = "1.2.0", default-features = false, optional = true } -portable-atomic = { version = "1", default-features = false, features = [ - "fallback", -], optional = true } -portable-atomic-util = { version = "0.2.4", default-features = false, optional = true } spin = { version = "0.9.8", default-features = false, features = [ "mutex", "spin_mutex", @@ -63,10 +61,25 @@ hashbrown = { version = "0.15.1", features = [ "equivalent", "raw-entry", ], optional = true, default-features = false } +serde = { version = "1", default-features = false, optional = true } +rayon = { version = "1", default-features = false, optional = true } [target.'cfg(target_arch = "wasm32")'.dependencies] -web-time = { version = "1.1", default-features = false } -getrandom = { version = "0.2.0", default-features = false, features = ["js"] } +web-time = { version = "1.1", default-features = false, optional = true } +getrandom = { version = "0.2.0", default-features = false, optional = true, features = [ + "js", +] } + +[target.'cfg(not(all(target_has_atomic = "8", target_has_atomic = "16", target_has_atomic = "32", target_has_atomic = "64", target_has_atomic = "ptr")))'.dependencies] +portable-atomic = { version = "1", default-features = false, features = [ + "fallback", +] } +spin = { version = "0.9.8", default-features = false, features = [ + "portable_atomic", +] } + +[target.'cfg(not(target_has_atomic = "ptr"))'.dependencies] +portable-atomic-util = { version = "0.2.4", default-features = false } [lints] workspace = true diff --git a/crates/bevy_platform/LICENSE-APACHE b/crates/bevy_platform/LICENSE-APACHE new file mode 100644 index 0000000000..d9a10c0d8e --- /dev/null +++ b/crates/bevy_platform/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_platform/LICENSE-MIT b/crates/bevy_platform/LICENSE-MIT new file mode 100644 index 0000000000..9cf106272a --- /dev/null +++ b/crates/bevy_platform/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_platform_support/README.md b/crates/bevy_platform/README.md similarity index 62% rename from crates/bevy_platform_support/README.md rename to crates/bevy_platform/README.md index 7daf5afa50..4d853751aa 100644 --- a/crates/bevy_platform_support/README.md +++ b/crates/bevy_platform/README.md @@ -1,16 +1,16 @@ # Bevy Platform Support [![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) -[![Crates.io](https://img.shields.io/crates/v/bevy_platform_support.svg)](https://crates.io/crates/bevy_platform_support) -[![Downloads](https://img.shields.io/crates/d/bevy_platform_support.svg)](https://crates.io/crates/bevy_platform_support) -[![Docs](https://docs.rs/bevy_platform_support/badge.svg)](https://docs.rs/bevy_platform_support/latest/bevy_platform_support/) +[![Crates.io](https://img.shields.io/crates/v/bevy_platform.svg)](https://crates.io/crates/bevy_platform) +[![Downloads](https://img.shields.io/crates/d/bevy_platform.svg)](https://crates.io/crates/bevy_platform) +[![Docs](https://docs.rs/bevy_platform/badge.svg)](https://docs.rs/bevy_platform/latest/bevy_platform/) [![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) Rust is a fantastic multi-platform language with extensive support for modern targets through its [standard library](https://doc.rust-lang.org/stable/std/). However, some items within the standard library have alternatives that are better suited for [Bevy](https://crates.io/crates/bevy) and game engines in general. Additionally, to support embedded and other esoteric platforms, it's often necessary to shed reliance on `std`, making your crate [`no_std`](https://docs.rust-embedded.org/book/intro/no-std.html). -These needs are handled by this crate, `bevy_platform_support`. +These needs are handled by this crate, `bevy_platform`. The goal of this crate is to provide alternatives and extensions to the Rust standard library which minimize friction when developing with and for Bevy across multiple platforms. ## Getting Started @@ -18,19 +18,19 @@ The goal of this crate is to provide alternatives and extensions to the Rust sta Like any dependency from [crates.io](https://crates.io/), use `cargo` to add it to your `Cargo.toml` file: ```sh -cargo add bevy_platform_support +cargo add bevy_platform ``` -Now, instead of importing from `std` you can use `bevy_platform_support` for items it has alternative for. +Now, instead of importing from `std` you can use `bevy_platform` for items it has alternative for. See the documentation for what items are available, and explanations for _why_ you may want to use them. ## `no_std` Support -By default, `bevy_platform_support` will activate the `std` feature, requiring access to the `std` crate for whichever platforms you're targeting. +By default, `bevy_platform` will activate the `std` feature, requiring access to the `std` crate for whichever platforms you're targeting. To use this crate on `no_std` platforms, disable default features: ```toml -bevy_platform_support = { version = "x.y.z", default-features = false } +bevy_platform = { version = "x.y.z", default-features = false } ``` ## Features @@ -45,12 +45,7 @@ This is explicitly incompatible with `no_std` targets. Enables usage of the [`alloc`](https://doc.rust-lang.org/stable/alloc/) crate. Note that this feature is automatically enabled when enabling `std`. This is compatible with most `no_std` targets, but not all. -### `portable-atomic` - -Switches to using [`portable-atomic`](https://docs.rs/portable-atomic/latest/portable_atomic/) as a backend for atomic types, such as `Arc`, `AtomicU8`, etc. -You may need to enable this feature on platforms without full support for atomic types or certain operations, such as [atomic CAS](https://en.wikipedia.org/wiki/Compare-and-swap). - ### `critical-section` Switches to using [`critical-section`](https://docs.rs/critical-section/latest/critical_section/) as a backend for synchronization. -You may need to enable this feature on platforms with little to no support for atomic operations, and is often paired with the `portable-atomic` feature. +You may need to enable this feature on platforms with little to no support for atomic operations. diff --git a/crates/bevy_platform/src/collections/hash_map.rs b/crates/bevy_platform/src/collections/hash_map.rs new file mode 100644 index 0000000000..ae978a7fce --- /dev/null +++ b/crates/bevy_platform/src/collections/hash_map.rs @@ -0,0 +1,1287 @@ +//! Provides [`HashMap`] based on [hashbrown]'s implementation. +//! Unlike [`hashbrown::HashMap`], [`HashMap`] defaults to [`FixedHasher`] +//! instead of [`RandomState`]. +//! This provides determinism by default with an acceptable compromise to denial +//! of service resistance in the context of a game engine. + +use core::{ + fmt::Debug, + hash::{BuildHasher, Hash}, + ops::{Deref, DerefMut, Index}, +}; + +use hashbrown::{hash_map as hb, Equivalent}; + +use crate::hash::FixedHasher; + +#[cfg(feature = "rayon")] +use rayon::prelude::{FromParallelIterator, IntoParallelIterator, ParallelExtend}; + +// Re-exports to match `std::collections::hash_map` +pub use { + crate::hash::{DefaultHasher, RandomState}, + hb::{ + Drain, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, OccupiedEntry, VacantEntry, + Values, ValuesMut, + }, +}; + +// Additional items from `hashbrown` +pub use hb::{ + EntryRef, ExtractIf, OccupiedError, RawEntryBuilder, RawEntryBuilderMut, RawEntryMut, + RawOccupiedEntryMut, +}; + +/// Shortcut for [`Entry`](hb::Entry) with [`FixedHasher`] as the default hashing provider. +pub type Entry<'a, K, V, S = FixedHasher> = hb::Entry<'a, K, V, S>; + +/// New-type for [`HashMap`](hb::HashMap) with [`FixedHasher`] as the default hashing provider. +/// Can be trivially converted to and from a [hashbrown] [`HashMap`](hb::HashMap) using [`From`]. +/// +/// A new-type is used instead of a type alias due to critical methods like [`new`](hb::HashMap::new) +/// being incompatible with Bevy's choice of default hasher. +#[repr(transparent)] +pub struct HashMap(hb::HashMap); + +impl Clone for HashMap +where + hb::HashMap: Clone, +{ + #[inline] + fn clone(&self) -> Self { + Self(self.0.clone()) + } + + #[inline] + fn clone_from(&mut self, source: &Self) { + self.0.clone_from(&source.0); + } +} + +impl Debug for HashMap +where + hb::HashMap: Debug, +{ + #[inline] + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + as Debug>::fmt(&self.0, f) + } +} + +impl Default for HashMap +where + hb::HashMap: Default, +{ + #[inline] + fn default() -> Self { + Self(Default::default()) + } +} + +impl PartialEq for HashMap +where + hb::HashMap: PartialEq, +{ + #[inline] + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for HashMap where hb::HashMap: Eq {} + +impl FromIterator for HashMap +where + hb::HashMap: FromIterator, +{ + #[inline] + fn from_iter>(iter: U) -> Self { + Self(FromIterator::from_iter(iter)) + } +} + +impl Index for HashMap +where + hb::HashMap: Index, +{ + type Output = as Index>::Output; + + #[inline] + fn index(&self, index: T) -> &Self::Output { + self.0.index(index) + } +} + +impl IntoIterator for HashMap +where + hb::HashMap: IntoIterator, +{ + type Item = as IntoIterator>::Item; + + type IntoIter = as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a HashMap +where + &'a hb::HashMap: IntoIterator, +{ + type Item = <&'a hb::HashMap as IntoIterator>::Item; + + type IntoIter = <&'a hb::HashMap as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + (&self.0).into_iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a mut HashMap +where + &'a mut hb::HashMap: IntoIterator, +{ + type Item = <&'a mut hb::HashMap as IntoIterator>::Item; + + type IntoIter = <&'a mut hb::HashMap as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + (&mut self.0).into_iter() + } +} + +impl Extend for HashMap +where + hb::HashMap: Extend, +{ + #[inline] + fn extend>(&mut self, iter: U) { + self.0.extend(iter); + } +} + +impl From<[(K, V); N]> for HashMap +where + K: Eq + Hash, +{ + fn from(arr: [(K, V); N]) -> Self { + arr.into_iter().collect() + } +} + +impl From> for HashMap { + #[inline] + fn from(value: hb::HashMap) -> Self { + Self(value) + } +} + +impl From> for hb::HashMap { + #[inline] + fn from(value: HashMap) -> Self { + value.0 + } +} + +impl Deref for HashMap { + type Target = hb::HashMap; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for HashMap { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[cfg(feature = "serialize")] +impl serde::Serialize for HashMap +where + hb::HashMap: serde::Serialize, +{ + #[inline] + fn serialize(&self, serializer: T) -> Result + where + T: serde::Serializer, + { + self.0.serialize(serializer) + } +} + +#[cfg(feature = "serialize")] +impl<'de, K, V, S> serde::Deserialize<'de> for HashMap +where + hb::HashMap: serde::Deserialize<'de>, +{ + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + Ok(Self(serde::Deserialize::deserialize(deserializer)?)) + } +} + +#[cfg(feature = "rayon")] +impl FromParallelIterator for HashMap +where + hb::HashMap: FromParallelIterator, + T: Send, +{ + fn from_par_iter

(par_iter: P) -> Self + where + P: IntoParallelIterator, + { + Self( as FromParallelIterator>::from_par_iter(par_iter)) + } +} + +#[cfg(feature = "rayon")] +impl IntoParallelIterator for HashMap +where + hb::HashMap: IntoParallelIterator, +{ + type Item = as IntoParallelIterator>::Item; + type Iter = as IntoParallelIterator>::Iter; + + fn into_par_iter(self) -> Self::Iter { + self.0.into_par_iter() + } +} + +#[cfg(feature = "rayon")] +impl<'a, K: Sync, V: Sync, S> IntoParallelIterator for &'a HashMap +where + &'a hb::HashMap: IntoParallelIterator, +{ + type Item = <&'a hb::HashMap as IntoParallelIterator>::Item; + type Iter = <&'a hb::HashMap as IntoParallelIterator>::Iter; + + fn into_par_iter(self) -> Self::Iter { + (&self.0).into_par_iter() + } +} + +#[cfg(feature = "rayon")] +impl<'a, K: Sync, V: Sync, S> IntoParallelIterator for &'a mut HashMap +where + &'a mut hb::HashMap: IntoParallelIterator, +{ + type Item = <&'a mut hb::HashMap as IntoParallelIterator>::Item; + type Iter = <&'a mut hb::HashMap as IntoParallelIterator>::Iter; + + fn into_par_iter(self) -> Self::Iter { + (&mut self.0).into_par_iter() + } +} + +#[cfg(feature = "rayon")] +impl ParallelExtend for HashMap +where + hb::HashMap: ParallelExtend, + T: Send, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + as ParallelExtend>::par_extend(&mut self.0, par_iter); + } +} + +impl HashMap { + /// Creates an empty [`HashMap`]. + /// + /// Refer to [`new`](hb::HashMap::new) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// // Creates a HashMap with zero capacity. + /// let map = HashMap::new(); + /// # + /// # let mut map = map; + /// # map.insert(0usize, "foo"); + /// # assert_eq!(map.get(&0), Some("foo").as_ref()); + /// ``` + #[inline] + pub const fn new() -> Self { + Self::with_hasher(FixedHasher) + } + + /// Creates an empty [`HashMap`] with the specified capacity. + /// + /// Refer to [`with_capacity`](hb::HashMap::with_capacity) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// // Creates a HashMap with capacity for at least 5 entries. + /// let map = HashMap::with_capacity(5); + /// # + /// # let mut map = map; + /// # map.insert(0usize, "foo"); + /// # assert_eq!(map.get(&0), Some("foo").as_ref()); + /// ``` + #[inline] + pub fn with_capacity(capacity: usize) -> Self { + Self::with_capacity_and_hasher(capacity, FixedHasher) + } +} + +impl HashMap { + /// Creates an empty [`HashMap`] which will use the given hash builder to hash + /// keys. + /// + /// Refer to [`with_hasher`](hb::HashMap::with_hasher) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # use bevy_platform::hash::FixedHasher as SomeHasher; + /// // Creates a HashMap with the provided hasher. + /// let map = HashMap::with_hasher(SomeHasher); + /// # + /// # let mut map = map; + /// # map.insert(0usize, "foo"); + /// # assert_eq!(map.get(&0), Some("foo").as_ref()); + /// ``` + #[inline] + pub const fn with_hasher(hash_builder: S) -> Self { + Self(hb::HashMap::with_hasher(hash_builder)) + } + + /// Creates an empty [`HashMap`] with the specified capacity, using `hash_builder` + /// to hash the keys. + /// + /// Refer to [`with_capacity_and_hasher`](hb::HashMap::with_capacity_and_hasher) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # use bevy_platform::hash::FixedHasher as SomeHasher; + /// // Creates a HashMap with capacity for 5 entries and the provided hasher. + /// let map = HashMap::with_capacity_and_hasher(5, SomeHasher); + /// # + /// # let mut map = map; + /// # map.insert(0usize, "foo"); + /// # assert_eq!(map.get(&0), Some("foo").as_ref()); + /// ``` + #[inline] + pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { + Self(hb::HashMap::with_capacity_and_hasher( + capacity, + hash_builder, + )) + } + + /// Returns a reference to the map's [`BuildHasher`], or `S` parameter. + /// + /// Refer to [`hasher`](hb::HashMap::hasher) for further details. + #[inline] + pub fn hasher(&self) -> &S { + self.0.hasher() + } + + /// Returns the number of elements the map can hold without reallocating. + /// + /// Refer to [`capacity`](hb::HashMap::capacity) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let map = HashMap::with_capacity(5); + /// + /// # let map: HashMap<(), ()> = map; + /// # + /// assert!(map.capacity() >= 5); + /// ``` + #[inline] + pub fn capacity(&self) -> usize { + self.0.capacity() + } + + /// An iterator visiting all keys in arbitrary order. + /// The iterator element type is `&'a K`. + /// + /// Refer to [`keys`](hb::HashMap::keys) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for key in map.keys() { + /// // foo, bar, baz + /// // Note that the above order is not guaranteed + /// } + /// # + /// # assert_eq!(map.keys().count(), 3); + /// ``` + #[inline] + pub fn keys(&self) -> Keys<'_, K, V> { + self.0.keys() + } + + /// An iterator visiting all values in arbitrary order. + /// The iterator element type is `&'a V`. + /// + /// Refer to [`values`](hb::HashMap::values) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for key in map.values() { + /// // 0, 1, 2 + /// // Note that the above order is not guaranteed + /// } + /// # + /// # assert_eq!(map.values().count(), 3); + /// ``` + #[inline] + pub fn values(&self) -> Values<'_, K, V> { + self.0.values() + } + + /// An iterator visiting all values mutably in arbitrary order. + /// The iterator element type is `&'a mut V`. + /// + /// Refer to [`values`](hb::HashMap::values) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for key in map.values_mut() { + /// // 0, 1, 2 + /// // Note that the above order is not guaranteed + /// } + /// # + /// # assert_eq!(map.values_mut().count(), 3); + /// ``` + #[inline] + pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { + self.0.values_mut() + } + + /// An iterator visiting all key-value pairs in arbitrary order. + /// The iterator element type is `(&'a K, &'a V)`. + /// + /// Refer to [`iter`](hb::HashMap::iter) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for (key, value) in map.iter() { + /// // ("foo", 0), ("bar", 1), ("baz", 2) + /// // Note that the above order is not guaranteed + /// } + /// # + /// # assert_eq!(map.iter().count(), 3); + /// ``` + #[inline] + pub fn iter(&self) -> Iter<'_, K, V> { + self.0.iter() + } + + /// An iterator visiting all key-value pairs in arbitrary order, + /// with mutable references to the values. + /// The iterator element type is `(&'a K, &'a mut V)`. + /// + /// Refer to [`iter_mut`](hb::HashMap::iter_mut) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for (key, value) in map.iter_mut() { + /// // ("foo", 0), ("bar", 1), ("baz", 2) + /// // Note that the above order is not guaranteed + /// } + /// # + /// # assert_eq!(map.iter_mut().count(), 3); + /// ``` + #[inline] + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { + self.0.iter_mut() + } + + /// Returns the number of elements in the map. + /// + /// Refer to [`len`](hb::HashMap::len) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// assert_eq!(map.len(), 0); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.len(), 1); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the map contains no elements. + /// + /// Refer to [`is_empty`](hb::HashMap::is_empty) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// assert!(map.is_empty()); + /// + /// map.insert("foo", 0); + /// + /// assert!(!map.is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Clears the map, returning all key-value pairs as an iterator. Keeps the + /// allocated memory for reuse. + /// + /// Refer to [`drain`](hb::HashMap::drain) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for (key, value) in map.drain() { + /// // ("foo", 0), ("bar", 1), ("baz", 2) + /// // Note that the above order is not guaranteed + /// } + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn drain(&mut self) -> Drain<'_, K, V> { + self.0.drain() + } + + /// Retains only the elements specified by the predicate. Keeps the + /// allocated memory for reuse. + /// + /// Refer to [`retain`](hb::HashMap::retain) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// map.retain(|key, value| *value == 2); + /// + /// assert_eq!(map.len(), 1); + /// ``` + #[inline] + pub fn retain(&mut self, f: F) + where + F: FnMut(&K, &mut V) -> bool, + { + self.0.retain(f); + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// Refer to [`extract_if`](hb::HashMap::extract_if) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// let extracted = map + /// .extract_if(|key, value| *value == 2) + /// .collect::>(); + /// + /// assert_eq!(map.len(), 2); + /// assert_eq!(extracted.len(), 1); + /// ``` + #[inline] + pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, K, V, F> + where + F: FnMut(&K, &mut V) -> bool, + { + self.0.extract_if(f) + } + + /// Clears the map, removing all key-value pairs. Keeps the allocated memory + /// for reuse. + /// + /// Refer to [`clear`](hb::HashMap::clear) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// map.clear(); + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn clear(&mut self) { + self.0.clear(); + } + + /// Creates a consuming iterator visiting all the keys in arbitrary order. + /// The map cannot be used after calling this. + /// The iterator element type is `K`. + /// + /// Refer to [`into_keys`](hb::HashMap::into_keys) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for key in map.into_keys() { + /// // "foo", "bar", "baz" + /// // Note that the above order is not guaranteed + /// } + /// ``` + #[inline] + pub fn into_keys(self) -> IntoKeys { + self.0.into_keys() + } + + /// Creates a consuming iterator visiting all the values in arbitrary order. + /// The map cannot be used after calling this. + /// The iterator element type is `V`. + /// + /// Refer to [`into_values`](hb::HashMap::into_values) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for key in map.into_values() { + /// // 0, 1, 2 + /// // Note that the above order is not guaranteed + /// } + /// ``` + #[inline] + pub fn into_values(self) -> IntoValues { + self.0.into_values() + } + + /// Takes the inner [`HashMap`](hb::HashMap) out of this wrapper. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let map: HashMap<&'static str, usize> = HashMap::new(); + /// let map: hashbrown::HashMap<&'static str, usize, _> = map.into_inner(); + /// ``` + #[inline] + pub fn into_inner(self) -> hb::HashMap { + self.0 + } +} + +impl HashMap +where + K: Eq + Hash, + S: BuildHasher, +{ + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the [`HashMap`]. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// Refer to [`reserve`](hb::HashMap::reserve) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::with_capacity(5); + /// + /// # let mut map: HashMap<(), ()> = map; + /// # + /// assert!(map.capacity() >= 5); + /// + /// map.reserve(10); + /// + /// assert!(map.capacity() - map.len() >= 10); + /// ``` + #[inline] + pub fn reserve(&mut self, additional: usize) { + self.0.reserve(additional); + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashMap`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// Refer to [`try_reserve`](hb::HashMap::try_reserve) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::with_capacity(5); + /// + /// # let mut map: HashMap<(), ()> = map; + /// # + /// assert!(map.capacity() >= 5); + /// + /// map.try_reserve(10).expect("Out of Memory!"); + /// + /// assert!(map.capacity() - map.len() >= 10); + /// ``` + #[inline] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), hashbrown::TryReserveError> { + self.0.try_reserve(additional) + } + + /// Shrinks the capacity of the map as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// Refer to [`shrink_to_fit`](hb::HashMap::shrink_to_fit) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::with_capacity(5); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// assert!(map.capacity() >= 5); + /// + /// map.shrink_to_fit(); + /// + /// assert_eq!(map.capacity(), 3); + /// ``` + #[inline] + pub fn shrink_to_fit(&mut self) { + self.0.shrink_to_fit(); + } + + /// Shrinks the capacity of the map with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// Refer to [`shrink_to`](hb::HashMap::shrink_to) for further details. + #[inline] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.0.shrink_to(min_capacity); + } + + /// Gets the given key's corresponding entry in the map for in-place manipulation. + /// + /// Refer to [`entry`](hb::HashMap::entry) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// let value = map.entry("foo").or_insert(0); + /// # + /// # assert_eq!(*value, 0); + /// ``` + #[inline] + pub fn entry(&mut self, key: K) -> Entry<'_, K, V, S> { + self.0.entry(key) + } + + /// Gets the given key's corresponding entry by reference in the map for in-place manipulation. + /// + /// Refer to [`entry_ref`](hb::HashMap::entry_ref) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// # let mut map: HashMap<&'static str, usize> = map; + /// + /// let value = map.entry_ref("foo").or_insert(0); + /// # + /// # assert_eq!(*value, 0); + /// ``` + #[inline] + pub fn entry_ref<'a, 'b, Q>(&'a mut self, key: &'b Q) -> EntryRef<'a, 'b, K, Q, V, S> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.entry_ref(key) + } + + /// Returns a reference to the value corresponding to the key. + /// + /// Refer to [`get`](hb::HashMap::get) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.get("foo"), Some(&0)); + /// ``` + #[inline] + pub fn get(&self, k: &Q) -> Option<&V> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get(k) + } + + /// Returns the key-value pair corresponding to the supplied key. + /// + /// Refer to [`get_key_value`](hb::HashMap::get_key_value) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.get_key_value("foo"), Some((&"foo", &0))); + /// ``` + #[inline] + pub fn get_key_value(&self, k: &Q) -> Option<(&K, &V)> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get_key_value(k) + } + + /// Returns the key-value pair corresponding to the supplied key, with a mutable reference to value. + /// + /// Refer to [`get_key_value_mut`](hb::HashMap::get_key_value_mut) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.get_key_value_mut("foo"), Some((&"foo", &mut 0))); + /// ``` + #[inline] + pub fn get_key_value_mut(&mut self, k: &Q) -> Option<(&K, &mut V)> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get_key_value_mut(k) + } + + /// Returns `true` if the map contains a value for the specified key. + /// + /// Refer to [`contains_key`](hb::HashMap::contains_key) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert!(map.contains_key("foo")); + /// ``` + #[inline] + pub fn contains_key(&self, k: &Q) -> bool + where + Q: Hash + Equivalent + ?Sized, + { + self.0.contains_key(k) + } + + /// Returns a mutable reference to the value corresponding to the key. + /// + /// Refer to [`get_mut`](hb::HashMap::get_mut) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.get_mut("foo"), Some(&mut 0)); + /// ``` + #[inline] + pub fn get_mut(&mut self, k: &Q) -> Option<&mut V> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get_mut(k) + } + + /// Attempts to get mutable references to `N` values in the map at once. + /// + /// Refer to [`get_many_mut`](hb::HashMap::get_many_mut) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// let result = map.get_many_mut(["foo", "bar"]); + /// + /// assert_eq!(result, [Some(&mut 0), Some(&mut 1)]); + /// ``` + #[inline] + pub fn get_many_mut(&mut self, ks: [&Q; N]) -> [Option<&'_ mut V>; N] + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get_many_mut(ks) + } + + /// Attempts to get mutable references to `N` values in the map at once, with immutable + /// references to the corresponding keys. + /// + /// Refer to [`get_many_key_value_mut`](hb::HashMap::get_many_key_value_mut) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// let result = map.get_many_key_value_mut(["foo", "bar"]); + /// + /// assert_eq!(result, [Some((&"foo", &mut 0)), Some((&"bar", &mut 1))]); + /// ``` + #[inline] + pub fn get_many_key_value_mut( + &mut self, + ks: [&Q; N], + ) -> [Option<(&'_ K, &'_ mut V)>; N] + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get_many_key_value_mut(ks) + } + + /// Inserts a key-value pair into the map. + /// + /// Refer to [`insert`](hb::HashMap::insert) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.get("foo"), Some(&0)); + /// ``` + #[inline] + pub fn insert(&mut self, k: K, v: V) -> Option { + self.0.insert(k, v) + } + + /// Tries to insert a key-value pair into the map, and returns + /// a mutable reference to the value in the entry. + /// + /// Refer to [`try_insert`](hb::HashMap::try_insert) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.try_insert("foo", 0).unwrap(); + /// + /// assert!(map.try_insert("foo", 1).is_err()); + /// ``` + #[inline] + pub fn try_insert(&mut self, key: K, value: V) -> Result<&mut V, OccupiedError<'_, K, V, S>> { + self.0.try_insert(key, value) + } + + /// Removes a key from the map, returning the value at the key if the key + /// was previously in the map. Keeps the allocated memory for reuse. + /// + /// Refer to [`remove`](hb::HashMap::remove) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.remove("foo"), Some(0)); + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn remove(&mut self, k: &Q) -> Option + where + Q: Hash + Equivalent + ?Sized, + { + self.0.remove(k) + } + + /// Removes a key from the map, returning the stored key and value if the + /// key was previously in the map. Keeps the allocated memory for reuse. + /// + /// Refer to [`remove_entry`](hb::HashMap::remove_entry) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.remove_entry("foo"), Some(("foo", 0))); + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn remove_entry(&mut self, k: &Q) -> Option<(K, V)> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.remove_entry(k) + } + + /// Returns the total amount of memory allocated internally by the hash + /// set, in bytes. + /// + /// Refer to [`allocation_size`](hb::HashMap::allocation_size) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// assert_eq!(map.allocation_size(), 0); + /// + /// map.insert("foo", 0u32); + /// + /// assert!(map.allocation_size() >= size_of::<&'static str>() + size_of::()); + /// ``` + #[inline] + pub fn allocation_size(&self) -> usize { + self.0.allocation_size() + } + + /// Insert a key-value pair into the map without checking + /// if the key already exists in the map. + /// + /// Refer to [`insert_unique_unchecked`](hb::HashMap::insert_unique_unchecked) for further details. + /// + /// # Safety + /// + /// This operation is safe if a key does not exist in the map. + /// + /// However, if a key exists in the map already, the behavior is unspecified: + /// this operation may panic, loop forever, or any following operation with the map + /// may panic, loop forever or return arbitrary result. + /// + /// That said, this operation (and following operations) are guaranteed to + /// not violate memory safety. + /// + /// However this operation is still unsafe because the resulting `HashMap` + /// may be passed to unsafe code which does expect the map to behave + /// correctly, and would cause unsoundness as a result. + #[expect( + unsafe_code, + reason = "re-exporting unsafe method from Hashbrown requires unsafe code" + )] + #[inline] + pub unsafe fn insert_unique_unchecked(&mut self, key: K, value: V) -> (&K, &mut V) { + // SAFETY: safety contract is ensured by the caller. + unsafe { self.0.insert_unique_unchecked(key, value) } + } + + /// Attempts to get mutable references to `N` values in the map at once, without validating that + /// the values are unique. + /// + /// Refer to [`get_many_unchecked_mut`](hb::HashMap::get_many_unchecked_mut) for further details. + /// + /// Returns an array of length `N` with the results of each query. `None` will be used if + /// the key is missing. + /// + /// For a safe alternative see [`get_many_mut`](`HashMap::get_many_mut`). + /// + /// # Safety + /// + /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting + /// references are not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[expect( + unsafe_code, + reason = "re-exporting unsafe method from Hashbrown requires unsafe code" + )] + #[inline] + pub unsafe fn get_many_unchecked_mut( + &mut self, + keys: [&Q; N], + ) -> [Option<&'_ mut V>; N] + where + Q: Hash + Equivalent + ?Sized, + { + // SAFETY: safety contract is ensured by the caller. + unsafe { self.0.get_many_unchecked_mut(keys) } + } + + /// Attempts to get mutable references to `N` values in the map at once, with immutable + /// references to the corresponding keys, without validating that the values are unique. + /// + /// Refer to [`get_many_key_value_unchecked_mut`](hb::HashMap::get_many_key_value_unchecked_mut) for further details. + /// + /// Returns an array of length `N` with the results of each query. `None` will be returned if + /// any of the keys are missing. + /// + /// For a safe alternative see [`get_many_key_value_mut`](`HashMap::get_many_key_value_mut`). + /// + /// # Safety + /// + /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting + /// references are not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[expect( + unsafe_code, + reason = "re-exporting unsafe method from Hashbrown requires unsafe code" + )] + #[inline] + pub unsafe fn get_many_key_value_unchecked_mut( + &mut self, + keys: [&Q; N], + ) -> [Option<(&'_ K, &'_ mut V)>; N] + where + Q: Hash + Equivalent + ?Sized, + { + // SAFETY: safety contract is ensured by the caller. + unsafe { self.0.get_many_key_value_unchecked_mut(keys) } + } +} diff --git a/crates/bevy_platform/src/collections/hash_set.rs b/crates/bevy_platform/src/collections/hash_set.rs new file mode 100644 index 0000000000..7950e946db --- /dev/null +++ b/crates/bevy_platform/src/collections/hash_set.rs @@ -0,0 +1,1078 @@ +//! Provides [`HashSet`] based on [hashbrown]'s implementation. +//! Unlike [`hashbrown::HashSet`], [`HashSet`] defaults to [`FixedHasher`] +//! instead of [`RandomState`](crate::hash::RandomState). +//! This provides determinism by default with an acceptable compromise to denial +//! of service resistance in the context of a game engine. + +use core::{ + fmt::Debug, + hash::{BuildHasher, Hash}, + ops::{ + BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Deref, DerefMut, Sub, + SubAssign, + }, +}; + +use hashbrown::{hash_set as hb, Equivalent}; + +use crate::hash::FixedHasher; + +#[cfg(feature = "rayon")] +use rayon::prelude::{FromParallelIterator, IntoParallelIterator, ParallelExtend}; + +// Re-exports to match `std::collections::hash_set` +pub use hb::{Difference, Drain, Intersection, IntoIter, Iter, SymmetricDifference, Union}; + +// Additional items from `hashbrown` +pub use hb::{ExtractIf, OccupiedEntry, VacantEntry}; + +/// Shortcut for [`Entry`](hb::Entry) with [`FixedHasher`] as the default hashing provider. +pub type Entry<'a, T, S = FixedHasher> = hb::Entry<'a, T, S>; + +/// New-type for [`HashSet`](hb::HashSet) with [`FixedHasher`] as the default hashing provider. +/// Can be trivially converted to and from a [hashbrown] [`HashSet`](hb::HashSet) using [`From`]. +/// +/// A new-type is used instead of a type alias due to critical methods like [`new`](hb::HashSet::new) +/// being incompatible with Bevy's choice of default hasher. +#[repr(transparent)] +pub struct HashSet(hb::HashSet); + +impl Clone for HashSet +where + hb::HashSet: Clone, +{ + #[inline] + fn clone(&self) -> Self { + Self(self.0.clone()) + } + + #[inline] + fn clone_from(&mut self, source: &Self) { + self.0.clone_from(&source.0); + } +} + +impl Debug for HashSet +where + hb::HashSet: Debug, +{ + #[inline] + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + as Debug>::fmt(&self.0, f) + } +} + +impl Default for HashSet +where + hb::HashSet: Default, +{ + #[inline] + fn default() -> Self { + Self(Default::default()) + } +} + +impl PartialEq for HashSet +where + hb::HashSet: PartialEq, +{ + #[inline] + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for HashSet where hb::HashSet: Eq {} + +impl FromIterator for HashSet +where + hb::HashSet: FromIterator, +{ + #[inline] + fn from_iter>(iter: U) -> Self { + Self(FromIterator::from_iter(iter)) + } +} + +impl IntoIterator for HashSet +where + hb::HashSet: IntoIterator, +{ + type Item = as IntoIterator>::Item; + + type IntoIter = as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, T, S> IntoIterator for &'a HashSet +where + &'a hb::HashSet: IntoIterator, +{ + type Item = <&'a hb::HashSet as IntoIterator>::Item; + + type IntoIter = <&'a hb::HashSet as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + (&self.0).into_iter() + } +} + +impl<'a, T, S> IntoIterator for &'a mut HashSet +where + &'a mut hb::HashSet: IntoIterator, +{ + type Item = <&'a mut hb::HashSet as IntoIterator>::Item; + + type IntoIter = <&'a mut hb::HashSet as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + (&mut self.0).into_iter() + } +} + +impl Extend for HashSet +where + hb::HashSet: Extend, +{ + #[inline] + fn extend>(&mut self, iter: U) { + self.0.extend(iter); + } +} + +impl From<[T; N]> for HashSet +where + T: Eq + Hash, +{ + fn from(value: [T; N]) -> Self { + value.into_iter().collect() + } +} + +impl From> for HashSet { + #[inline] + fn from(value: crate::collections::HashMap) -> Self { + Self(hb::HashSet::from(hashbrown::HashMap::from(value))) + } +} + +impl From> for HashSet { + #[inline] + fn from(value: hb::HashSet) -> Self { + Self(value) + } +} + +impl From> for hb::HashSet { + #[inline] + fn from(value: HashSet) -> Self { + value.0 + } +} + +impl Deref for HashSet { + type Target = hb::HashSet; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for HashSet { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[cfg(feature = "serialize")] +impl serde::Serialize for HashSet +where + hb::HashSet: serde::Serialize, +{ + #[inline] + fn serialize(&self, serializer: U) -> Result + where + U: serde::Serializer, + { + self.0.serialize(serializer) + } +} + +#[cfg(feature = "serialize")] +impl<'de, T, S> serde::Deserialize<'de> for HashSet +where + hb::HashSet: serde::Deserialize<'de>, +{ + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + Ok(Self(serde::Deserialize::deserialize(deserializer)?)) + } +} + +#[cfg(feature = "rayon")] +impl FromParallelIterator for HashSet +where + hb::HashSet: FromParallelIterator, + U: Send, +{ + fn from_par_iter

(par_iter: P) -> Self + where + P: IntoParallelIterator, + { + Self( as FromParallelIterator>::from_par_iter(par_iter)) + } +} + +#[cfg(feature = "rayon")] +impl IntoParallelIterator for HashSet +where + hb::HashSet: IntoParallelIterator, +{ + type Item = as IntoParallelIterator>::Item; + type Iter = as IntoParallelIterator>::Iter; + + fn into_par_iter(self) -> Self::Iter { + self.0.into_par_iter() + } +} + +#[cfg(feature = "rayon")] +impl<'a, T: Sync, S> IntoParallelIterator for &'a HashSet +where + &'a hb::HashSet: IntoParallelIterator, +{ + type Item = <&'a hb::HashSet as IntoParallelIterator>::Item; + type Iter = <&'a hb::HashSet as IntoParallelIterator>::Iter; + + fn into_par_iter(self) -> Self::Iter { + (&self.0).into_par_iter() + } +} + +#[cfg(feature = "rayon")] +impl ParallelExtend for HashSet +where + hb::HashSet: ParallelExtend, + U: Send, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + as ParallelExtend>::par_extend(&mut self.0, par_iter); + } +} + +impl HashSet { + /// Creates an empty [`HashSet`]. + /// + /// Refer to [`new`](hb::HashSet::new) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// // Creates a HashSet with zero capacity. + /// let map = HashSet::new(); + /// # + /// # let mut map = map; + /// # map.insert("foo"); + /// # assert_eq!(map.get("foo"), Some("foo").as_ref()); + /// ``` + #[inline] + pub const fn new() -> Self { + Self::with_hasher(FixedHasher) + } + + /// Creates an empty [`HashSet`] with the specified capacity. + /// + /// Refer to [`with_capacity`](hb::HashSet::with_capacity) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// // Creates a HashSet with capacity for at least 5 entries. + /// let map = HashSet::with_capacity(5); + /// # + /// # let mut map = map; + /// # map.insert("foo"); + /// # assert_eq!(map.get("foo"), Some("foo").as_ref()); + /// ``` + #[inline] + pub fn with_capacity(capacity: usize) -> Self { + Self::with_capacity_and_hasher(capacity, FixedHasher) + } +} + +impl HashSet { + /// Returns the number of elements the set can hold without reallocating. + /// + /// Refer to [`capacity`](hb::HashSet::capacity) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let map = HashSet::with_capacity(5); + /// + /// # let map: HashSet<()> = map; + /// # + /// assert!(map.capacity() >= 5); + /// ``` + #[inline] + pub fn capacity(&self) -> usize { + self.0.capacity() + } + + /// An iterator visiting all elements in arbitrary order. + /// The iterator element type is `&'a T`. + /// + /// Refer to [`iter`](hb::HashSet::iter) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// map.insert("bar"); + /// map.insert("baz"); + /// + /// for value in map.iter() { + /// // "foo", "bar", "baz" + /// // Note that the above order is not guaranteed + /// } + /// # + /// # assert_eq!(map.iter().count(), 3); + /// ``` + #[inline] + pub fn iter(&self) -> Iter<'_, T> { + self.0.iter() + } + + /// Returns the number of elements in the set. + /// + /// Refer to [`len`](hb::HashSet::len) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// assert_eq!(map.len(), 0); + /// + /// map.insert("foo"); + /// + /// assert_eq!(map.len(), 1); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the set contains no elements. + /// + /// Refer to [`is_empty`](hb::HashSet::is_empty) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// assert!(map.is_empty()); + /// + /// map.insert("foo"); + /// + /// assert!(!map.is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Clears the set, returning all elements in an iterator. + /// + /// Refer to [`drain`](hb::HashSet::drain) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// map.insert("bar"); + /// map.insert("baz"); + /// + /// for value in map.drain() { + /// // "foo", "bar", "baz" + /// // Note that the above order is not guaranteed + /// } + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn drain(&mut self) -> Drain<'_, T> { + self.0.drain() + } + + /// Retains only the elements specified by the predicate. + /// + /// Refer to [`retain`](hb::HashSet::retain) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// map.insert("bar"); + /// map.insert("baz"); + /// + /// map.retain(|value| *value == "baz"); + /// + /// assert_eq!(map.len(), 1); + /// ``` + #[inline] + pub fn retain(&mut self, f: F) + where + F: FnMut(&T) -> bool, + { + self.0.retain(f); + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// Refer to [`extract_if`](hb::HashSet::extract_if) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// map.insert("bar"); + /// map.insert("baz"); + /// + /// let extracted = map + /// .extract_if(|value| *value == "baz") + /// .collect::>(); + /// + /// assert_eq!(map.len(), 2); + /// assert_eq!(extracted.len(), 1); + /// ``` + #[inline] + pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, T, F> + where + F: FnMut(&T) -> bool, + { + self.0.extract_if(f) + } + + /// Clears the set, removing all values. + /// + /// Refer to [`clear`](hb::HashSet::clear) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// map.insert("bar"); + /// map.insert("baz"); + /// + /// map.clear(); + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn clear(&mut self) { + self.0.clear(); + } + + /// Creates a new empty hash set which will use the given hasher to hash + /// keys. + /// + /// Refer to [`with_hasher`](hb::HashSet::with_hasher) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # use bevy_platform::hash::FixedHasher as SomeHasher; + /// // Creates a HashSet with the provided hasher. + /// let map = HashSet::with_hasher(SomeHasher); + /// # + /// # let mut map = map; + /// # map.insert("foo"); + /// # assert_eq!(map.get("foo"), Some("foo").as_ref()); + /// ``` + #[inline] + pub const fn with_hasher(hasher: S) -> Self { + Self(hb::HashSet::with_hasher(hasher)) + } + + /// Creates an empty [`HashSet`] with the specified capacity, using + /// `hasher` to hash the keys. + /// + /// Refer to [`with_capacity_and_hasher`](hb::HashSet::with_capacity_and_hasher) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # use bevy_platform::hash::FixedHasher as SomeHasher; + /// // Creates a HashSet with capacity for 5 entries and the provided hasher. + /// let map = HashSet::with_capacity_and_hasher(5, SomeHasher); + /// # + /// # let mut map = map; + /// # map.insert("foo"); + /// # assert_eq!(map.get("foo"), Some("foo").as_ref()); + /// ``` + #[inline] + pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self { + Self(hb::HashSet::with_capacity_and_hasher(capacity, hasher)) + } + + /// Returns a reference to the set's [`BuildHasher`]. + /// + /// Refer to [`hasher`](hb::HashSet::hasher) for further details. + #[inline] + pub fn hasher(&self) -> &S { + self.0.hasher() + } + + /// Takes the inner [`HashSet`](hb::HashSet) out of this wrapper. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let map: HashSet<&'static str> = HashSet::new(); + /// let map: hashbrown::HashSet<&'static str, _> = map.into_inner(); + /// ``` + #[inline] + pub fn into_inner(self) -> hb::HashSet { + self.0 + } +} + +impl HashSet +where + T: Eq + Hash, + S: BuildHasher, +{ + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the [`HashSet`]. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// Refer to [`reserve`](hb::HashSet::reserve) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::with_capacity(5); + /// + /// # let mut map: HashSet<()> = map; + /// # + /// assert!(map.capacity() >= 5); + /// + /// map.reserve(10); + /// + /// assert!(map.capacity() - map.len() >= 10); + /// ``` + #[inline] + pub fn reserve(&mut self, additional: usize) { + self.0.reserve(additional); + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashSet`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// Refer to [`try_reserve`](hb::HashSet::try_reserve) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::with_capacity(5); + /// + /// # let mut map: HashSet<()> = map; + /// # + /// assert!(map.capacity() >= 5); + /// + /// map.try_reserve(10).expect("Out of Memory!"); + /// + /// assert!(map.capacity() - map.len() >= 10); + /// ``` + #[inline] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), hashbrown::TryReserveError> { + self.0.try_reserve(additional) + } + + /// Shrinks the capacity of the set as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// Refer to [`shrink_to_fit`](hb::HashSet::shrink_to_fit) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::with_capacity(5); + /// + /// map.insert("foo"); + /// map.insert("bar"); + /// map.insert("baz"); + /// + /// assert!(map.capacity() >= 5); + /// + /// map.shrink_to_fit(); + /// + /// assert_eq!(map.capacity(), 3); + /// ``` + #[inline] + pub fn shrink_to_fit(&mut self) { + self.0.shrink_to_fit(); + } + + /// Shrinks the capacity of the set with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// Refer to [`shrink_to`](hb::HashSet::shrink_to) for further details. + #[inline] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.0.shrink_to(min_capacity); + } + + /// Visits the values representing the difference, + /// i.e., the values that are in `self` but not in `other`. + /// + /// Refer to [`difference`](hb::HashSet::difference) for further details. + #[inline] + pub fn difference<'a>(&'a self, other: &'a Self) -> Difference<'a, T, S> { + self.0.difference(other) + } + + /// Visits the values representing the symmetric difference, + /// i.e., the values that are in `self` or in `other` but not in both. + /// + /// Refer to [`symmetric_difference`](hb::HashSet::symmetric_difference) for further details. + #[inline] + pub fn symmetric_difference<'a>(&'a self, other: &'a Self) -> SymmetricDifference<'a, T, S> { + self.0.symmetric_difference(other) + } + + /// Visits the values representing the intersection, + /// i.e., the values that are both in `self` and `other`. + /// + /// Refer to [`intersection`](hb::HashSet::intersection) for further details. + #[inline] + pub fn intersection<'a>(&'a self, other: &'a Self) -> Intersection<'a, T, S> { + self.0.intersection(other) + } + + /// Visits the values representing the union, + /// i.e., all the values in `self` or `other`, without duplicates. + /// + /// Refer to [`union`](hb::HashSet::union) for further details. + #[inline] + pub fn union<'a>(&'a self, other: &'a Self) -> Union<'a, T, S> { + self.0.union(other) + } + + /// Returns `true` if the set contains a value. + /// + /// Refer to [`contains`](hb::HashSet::contains) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// + /// assert!(map.contains("foo")); + /// ``` + #[inline] + pub fn contains(&self, value: &Q) -> bool + where + Q: Hash + Equivalent + ?Sized, + { + self.0.contains(value) + } + + /// Returns a reference to the value in the set, if any, that is equal to the given value. + /// + /// Refer to [`get`](hb::HashSet::get) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// + /// assert_eq!(map.get("foo"), Some(&"foo")); + /// ``` + #[inline] + pub fn get(&self, value: &Q) -> Option<&T> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get(value) + } + + /// Inserts the given `value` into the set if it is not present, then + /// returns a reference to the value in the set. + /// + /// Refer to [`get_or_insert`](hb::HashSet::get_or_insert) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// assert_eq!(map.get_or_insert("foo"), &"foo"); + /// ``` + #[inline] + pub fn get_or_insert(&mut self, value: T) -> &T { + self.0.get_or_insert(value) + } + + /// Inserts a value computed from `f` into the set if the given `value` is + /// not present, then returns a reference to the value in the set. + /// + /// Refer to [`get_or_insert_with`](hb::HashSet::get_or_insert_with) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// assert_eq!(map.get_or_insert_with(&"foo", |_| "foo"), &"foo"); + /// ``` + #[inline] + pub fn get_or_insert_with(&mut self, value: &Q, f: F) -> &T + where + Q: Hash + Equivalent + ?Sized, + F: FnOnce(&Q) -> T, + { + self.0.get_or_insert_with(value, f) + } + + /// Gets the given value's corresponding entry in the set for in-place manipulation. + /// + /// Refer to [`entry`](hb::HashSet::entry) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// let value = map.entry("foo").or_insert(); + /// # + /// # assert_eq!(value, ()); + /// ``` + #[inline] + pub fn entry(&mut self, value: T) -> Entry<'_, T, S> { + self.0.entry(value) + } + + /// Returns `true` if `self` has no elements in common with `other`. + /// This is equivalent to checking for an empty intersection. + /// + /// Refer to [`is_disjoint`](hb::HashSet::is_disjoint) for further details. + #[inline] + pub fn is_disjoint(&self, other: &Self) -> bool { + self.0.is_disjoint(other) + } + + /// Returns `true` if the set is a subset of another, + /// i.e., `other` contains at least all the values in `self`. + /// + /// Refer to [`is_subset`](hb::HashSet::is_subset) for further details. + #[inline] + pub fn is_subset(&self, other: &Self) -> bool { + self.0.is_subset(other) + } + + /// Returns `true` if the set is a superset of another, + /// i.e., `self` contains at least all the values in `other`. + /// + /// Refer to [`is_superset`](hb::HashSet::is_superset) for further details. + #[inline] + pub fn is_superset(&self, other: &Self) -> bool { + self.0.is_superset(other) + } + + /// Adds a value to the set. + /// + /// Refer to [`insert`](hb::HashSet::insert) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// + /// assert!(map.contains("foo")); + /// ``` + #[inline] + pub fn insert(&mut self, value: T) -> bool { + self.0.insert(value) + } + + /// Adds a value to the set, replacing the existing value, if any, that is equal to the given + /// one. Returns the replaced value. + /// + /// Refer to [`replace`](hb::HashSet::replace) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// + /// assert_eq!(map.replace("foo"), Some("foo")); + /// ``` + #[inline] + pub fn replace(&mut self, value: T) -> Option { + self.0.replace(value) + } + + /// Removes a value from the set. Returns whether the value was + /// present in the set. + /// + /// Refer to [`remove`](hb::HashSet::remove) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// + /// assert!(map.remove("foo")); + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn remove(&mut self, value: &Q) -> bool + where + Q: Hash + Equivalent + ?Sized, + { + self.0.remove(value) + } + + /// Removes and returns the value in the set, if any, that is equal to the given one. + /// + /// Refer to [`take`](hb::HashSet::take) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// + /// assert_eq!(map.take("foo"), Some("foo")); + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn take(&mut self, value: &Q) -> Option + where + Q: Hash + Equivalent + ?Sized, + { + self.0.take(value) + } + + /// Returns the total amount of memory allocated internally by the hash + /// set, in bytes. + /// + /// Refer to [`allocation_size`](hb::HashSet::allocation_size) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// assert_eq!(map.allocation_size(), 0); + /// + /// map.insert("foo"); + /// + /// assert!(map.allocation_size() >= size_of::<&'static str>()); + /// ``` + #[inline] + pub fn allocation_size(&self) -> usize { + self.0.allocation_size() + } + + /// Insert a value the set without checking if the value already exists in the set. + /// + /// Refer to [`insert_unique_unchecked`](hb::HashSet::insert_unique_unchecked) for further details. + /// + /// # Safety + /// + /// This operation is safe if a value does not exist in the set. + /// + /// However, if a value exists in the set already, the behavior is unspecified: + /// this operation may panic, loop forever, or any following operation with the set + /// may panic, loop forever or return arbitrary result. + /// + /// That said, this operation (and following operations) are guaranteed to + /// not violate memory safety. + /// + /// However this operation is still unsafe because the resulting `HashSet` + /// may be passed to unsafe code which does expect the set to behave + /// correctly, and would cause unsoundness as a result. + #[expect( + unsafe_code, + reason = "re-exporting unsafe method from Hashbrown requires unsafe code" + )] + #[inline] + pub unsafe fn insert_unique_unchecked(&mut self, value: T) -> &T { + // SAFETY: safety contract is ensured by the caller. + unsafe { self.0.insert_unique_unchecked(value) } + } +} + +impl BitOr<&HashSet> for &HashSet +where + for<'a> &'a hb::HashSet: BitOr<&'a hb::HashSet, Output = hb::HashSet>, +{ + type Output = HashSet; + + /// Returns the union of `self` and `rhs` as a new `HashSet`. + #[inline] + fn bitor(self, rhs: &HashSet) -> HashSet { + HashSet(self.0.bitor(&rhs.0)) + } +} + +impl BitAnd<&HashSet> for &HashSet +where + for<'a> &'a hb::HashSet: BitAnd<&'a hb::HashSet, Output = hb::HashSet>, +{ + type Output = HashSet; + + /// Returns the intersection of `self` and `rhs` as a new `HashSet`. + #[inline] + fn bitand(self, rhs: &HashSet) -> HashSet { + HashSet(self.0.bitand(&rhs.0)) + } +} + +impl BitXor<&HashSet> for &HashSet +where + for<'a> &'a hb::HashSet: BitXor<&'a hb::HashSet, Output = hb::HashSet>, +{ + type Output = HashSet; + + /// Returns the symmetric difference of `self` and `rhs` as a new `HashSet`. + #[inline] + fn bitxor(self, rhs: &HashSet) -> HashSet { + HashSet(self.0.bitxor(&rhs.0)) + } +} + +impl Sub<&HashSet> for &HashSet +where + for<'a> &'a hb::HashSet: Sub<&'a hb::HashSet, Output = hb::HashSet>, +{ + type Output = HashSet; + + /// Returns the difference of `self` and `rhs` as a new `HashSet`. + #[inline] + fn sub(self, rhs: &HashSet) -> HashSet { + HashSet(self.0.sub(&rhs.0)) + } +} + +impl BitOrAssign<&HashSet> for HashSet +where + hb::HashSet: for<'a> BitOrAssign<&'a hb::HashSet>, +{ + /// Modifies this set to contain the union of `self` and `rhs`. + #[inline] + fn bitor_assign(&mut self, rhs: &HashSet) { + self.0.bitor_assign(&rhs.0); + } +} + +impl BitAndAssign<&HashSet> for HashSet +where + hb::HashSet: for<'a> BitAndAssign<&'a hb::HashSet>, +{ + /// Modifies this set to contain the intersection of `self` and `rhs`. + #[inline] + fn bitand_assign(&mut self, rhs: &HashSet) { + self.0.bitand_assign(&rhs.0); + } +} + +impl BitXorAssign<&HashSet> for HashSet +where + hb::HashSet: for<'a> BitXorAssign<&'a hb::HashSet>, +{ + /// Modifies this set to contain the symmetric difference of `self` and `rhs`. + #[inline] + fn bitxor_assign(&mut self, rhs: &HashSet) { + self.0.bitxor_assign(&rhs.0); + } +} + +impl SubAssign<&HashSet> for HashSet +where + hb::HashSet: for<'a> SubAssign<&'a hb::HashSet>, +{ + /// Modifies this set to contain the difference of `self` and `rhs`. + #[inline] + fn sub_assign(&mut self, rhs: &HashSet) { + self.0.sub_assign(&rhs.0); + } +} diff --git a/crates/bevy_platform/src/collections/hash_table.rs b/crates/bevy_platform/src/collections/hash_table.rs new file mode 100644 index 0000000000..5d6a265679 --- /dev/null +++ b/crates/bevy_platform/src/collections/hash_table.rs @@ -0,0 +1,6 @@ +//! Provides [`HashTable`] + +pub use hashbrown::hash_table::{ + AbsentEntry, Drain, Entry, ExtractIf, HashTable, IntoIter, Iter, IterHash, IterHashMut, + IterMut, OccupiedEntry, VacantEntry, +}; diff --git a/crates/bevy_platform/src/collections/mod.rs b/crates/bevy_platform/src/collections/mod.rs new file mode 100644 index 0000000000..3622165b65 --- /dev/null +++ b/crates/bevy_platform/src/collections/mod.rs @@ -0,0 +1,12 @@ +//! Provides [`HashMap`] and [`HashSet`] from [`hashbrown`] with some customized defaults. +//! +//! Also provides the [`HashTable`] type, which is specific to [`hashbrown`]. + +pub use hash_map::HashMap; +pub use hash_set::HashSet; +pub use hash_table::HashTable; +pub use hashbrown::Equivalent; + +pub mod hash_map; +pub mod hash_set; +pub mod hash_table; diff --git a/crates/bevy_platform_support/src/hash.rs b/crates/bevy_platform/src/hash.rs similarity index 100% rename from crates/bevy_platform_support/src/hash.rs rename to crates/bevy_platform/src/hash.rs diff --git a/crates/bevy_platform_support/src/lib.rs b/crates/bevy_platform/src/lib.rs similarity index 98% rename from crates/bevy_platform_support/src/lib.rs rename to crates/bevy_platform/src/lib.rs index eada254595..96f2f9a21c 100644 --- a/crates/bevy_platform_support/src/lib.rs +++ b/crates/bevy_platform/src/lib.rs @@ -17,6 +17,7 @@ extern crate alloc; pub mod hash; pub mod sync; +pub mod thread; pub mod time; #[cfg(feature = "alloc")] diff --git a/crates/bevy_platform/src/sync/atomic.rs b/crates/bevy_platform/src/sync/atomic.rs new file mode 100644 index 0000000000..65211482a6 --- /dev/null +++ b/crates/bevy_platform/src/sync/atomic.rs @@ -0,0 +1,43 @@ +//! Provides various atomic alternatives to language primitives. +//! +//! Certain platforms lack complete atomic support, requiring the use of a fallback +//! such as `portable-atomic`. +//! Using these types will ensure the correct atomic provider is used without the need for +//! feature gates in your own code. + +pub use atomic_16::{AtomicI16, AtomicU16}; +pub use atomic_32::{AtomicI32, AtomicU32}; +pub use atomic_64::{AtomicI64, AtomicU64}; +pub use atomic_8::{AtomicBool, AtomicI8, AtomicU8}; +pub use atomic_ptr::{AtomicIsize, AtomicPtr, AtomicUsize}; +pub use core::sync::atomic::Ordering; + +#[cfg(target_has_atomic = "8")] +use core::sync::atomic as atomic_8; + +#[cfg(not(target_has_atomic = "8"))] +use portable_atomic as atomic_8; + +#[cfg(target_has_atomic = "16")] +use core::sync::atomic as atomic_16; + +#[cfg(not(target_has_atomic = "16"))] +use portable_atomic as atomic_16; + +#[cfg(target_has_atomic = "32")] +use core::sync::atomic as atomic_32; + +#[cfg(not(target_has_atomic = "32"))] +use portable_atomic as atomic_32; + +#[cfg(target_has_atomic = "64")] +use core::sync::atomic as atomic_64; + +#[cfg(not(target_has_atomic = "64"))] +use portable_atomic as atomic_64; + +#[cfg(target_has_atomic = "ptr")] +use core::sync::atomic as atomic_ptr; + +#[cfg(not(target_has_atomic = "ptr"))] +use portable_atomic as atomic_ptr; diff --git a/crates/bevy_platform_support/src/sync/barrier.rs b/crates/bevy_platform/src/sync/barrier.rs similarity index 94% rename from crates/bevy_platform_support/src/sync/barrier.rs rename to crates/bevy_platform/src/sync/barrier.rs index 6c179d81d6..2968a78b01 100644 --- a/crates/bevy_platform_support/src/sync/barrier.rs +++ b/crates/bevy_platform/src/sync/barrier.rs @@ -1,12 +1,12 @@ //! Provides `Barrier` and `BarrierWaitResult` -pub use barrier::{Barrier, BarrierWaitResult}; +pub use implementation::{Barrier, BarrierWaitResult}; #[cfg(feature = "std")] -use std::sync as barrier; +use std::sync as implementation; #[cfg(not(feature = "std"))] -mod barrier { +mod implementation { use core::fmt; /// Fallback implementation of `Barrier` from the standard library. diff --git a/crates/bevy_platform_support/src/sync/lazy_lock.rs b/crates/bevy_platform/src/sync/lazy_lock.rs similarity index 57% rename from crates/bevy_platform_support/src/sync/lazy_lock.rs rename to crates/bevy_platform/src/sync/lazy_lock.rs index 8a13c1bef2..c756daeb94 100644 --- a/crates/bevy_platform_support/src/sync/lazy_lock.rs +++ b/crates/bevy_platform/src/sync/lazy_lock.rs @@ -1,11 +1,11 @@ //! Provides `LazyLock` -pub use lazy_lock::LazyLock; +pub use implementation::LazyLock; #[cfg(feature = "std")] -use std::sync as lazy_lock; +use std::sync as implementation; #[cfg(not(feature = "std"))] -mod lazy_lock { +mod implementation { pub use spin::Lazy as LazyLock; } diff --git a/crates/bevy_platform_support/src/sync/mod.rs b/crates/bevy_platform/src/sync/mod.rs similarity index 85% rename from crates/bevy_platform_support/src/sync/mod.rs rename to crates/bevy_platform/src/sync/mod.rs index edb0217262..8fb7a2fbff 100644 --- a/crates/bevy_platform_support/src/sync/mod.rs +++ b/crates/bevy_platform/src/sync/mod.rs @@ -26,8 +26,8 @@ mod once; mod poison; mod rwlock; -#[cfg(all(feature = "alloc", feature = "portable-atomic"))] +#[cfg(all(feature = "alloc", not(target_has_atomic = "ptr")))] use portable_atomic_util as arc; -#[cfg(all(feature = "alloc", not(feature = "portable-atomic")))] +#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] use alloc::sync as arc; diff --git a/crates/bevy_platform_support/src/sync/mutex.rs b/crates/bevy_platform/src/sync/mutex.rs similarity index 95% rename from crates/bevy_platform_support/src/sync/mutex.rs rename to crates/bevy_platform/src/sync/mutex.rs index a059d670e9..7ff363f574 100644 --- a/crates/bevy_platform_support/src/sync/mutex.rs +++ b/crates/bevy_platform/src/sync/mutex.rs @@ -1,12 +1,12 @@ //! Provides `Mutex` and `MutexGuard` -pub use mutex::{Mutex, MutexGuard}; +pub use implementation::{Mutex, MutexGuard}; #[cfg(feature = "std")] -use std::sync as mutex; +use std::sync as implementation; #[cfg(not(feature = "std"))] -mod mutex { +mod implementation { use crate::sync::{LockResult, TryLockError, TryLockResult}; use core::fmt; @@ -81,7 +81,7 @@ mod mutex { } } - impl Default for Mutex { + impl Default for Mutex { fn default() -> Mutex { Mutex::new(Default::default()) } diff --git a/crates/bevy_platform_support/src/sync/once.rs b/crates/bevy_platform/src/sync/once.rs similarity index 97% rename from crates/bevy_platform_support/src/sync/once.rs rename to crates/bevy_platform/src/sync/once.rs index 2ae733f387..f4ac34b905 100644 --- a/crates/bevy_platform_support/src/sync/once.rs +++ b/crates/bevy_platform/src/sync/once.rs @@ -1,12 +1,12 @@ //! Provides `Once`, `OnceState`, `OnceLock` -pub use once::{Once, OnceLock, OnceState}; +pub use implementation::{Once, OnceLock, OnceState}; #[cfg(feature = "std")] -use std::sync as once; +use std::sync as implementation; #[cfg(not(feature = "std"))] -mod once { +mod implementation { use core::{ fmt, panic::{RefUnwindSafe, UnwindSafe}, @@ -145,6 +145,7 @@ mod once { /// Creates a new `Once` value. /// /// See the standard library for further details. + #[expect(clippy::new_without_default, reason = "matching std::sync::Once")] pub const fn new() -> Self { Self { inner: OnceLock::new(), diff --git a/crates/bevy_platform_support/src/sync/poison.rs b/crates/bevy_platform/src/sync/poison.rs similarity index 96% rename from crates/bevy_platform_support/src/sync/poison.rs rename to crates/bevy_platform/src/sync/poison.rs index 0aa8e168c2..79eafc4250 100644 --- a/crates/bevy_platform_support/src/sync/poison.rs +++ b/crates/bevy_platform/src/sync/poison.rs @@ -1,12 +1,12 @@ //! Provides `LockResult`, `PoisonError`, `TryLockError`, `TryLockResult` -pub use poison::{LockResult, PoisonError, TryLockError, TryLockResult}; +pub use implementation::{LockResult, PoisonError, TryLockError, TryLockResult}; #[cfg(feature = "std")] -use std::sync as poison; +use std::sync as implementation; #[cfg(not(feature = "std"))] -mod poison { +mod implementation { use core::{error::Error, fmt}; /// Fallback implementation of `PoisonError` from the standard library. diff --git a/crates/bevy_platform_support/src/sync/rwlock.rs b/crates/bevy_platform/src/sync/rwlock.rs similarity index 95% rename from crates/bevy_platform_support/src/sync/rwlock.rs rename to crates/bevy_platform/src/sync/rwlock.rs index 627da73f32..f1f529baaf 100644 --- a/crates/bevy_platform_support/src/sync/rwlock.rs +++ b/crates/bevy_platform/src/sync/rwlock.rs @@ -1,12 +1,12 @@ -//! TODO: Implement `RwLock`, `RwLockReadGuard`, `RwLockWriteGuard` +//! Provides `RwLock`, `RwLockReadGuard`, `RwLockWriteGuard` -pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +pub use implementation::{RwLock, RwLockReadGuard, RwLockWriteGuard}; #[cfg(feature = "std")] -use std::sync as rwlock; +use std::sync as implementation; #[cfg(not(feature = "std"))] -mod rwlock { +mod implementation { use crate::sync::{LockResult, TryLockError, TryLockResult}; use core::fmt; diff --git a/crates/bevy_platform/src/thread.rs b/crates/bevy_platform/src/thread.rs new file mode 100644 index 0000000000..e1d593c90b --- /dev/null +++ b/crates/bevy_platform/src/thread.rs @@ -0,0 +1,29 @@ +//! Provides `sleep` for all platforms. + +pub use thread::sleep; + +cfg_if::cfg_if! { + // TODO: use browser timeouts based on ScheduleRunnerPlugin::build + if #[cfg(feature = "std")] { + use std::thread; + } else { + mod fallback { + use core::{hint::spin_loop, time::Duration}; + + use crate::time::Instant; + + /// Puts the current thread to sleep for at least the specified amount of time. + /// + /// As this is a `no_std` fallback implementation, this will spin the current thread. + pub fn sleep(dur: Duration) { + let start = Instant::now(); + + while start.elapsed() < dur { + spin_loop() + } + } + } + + use fallback as thread; + } +} diff --git a/crates/bevy_platform/src/time/fallback.rs b/crates/bevy_platform/src/time/fallback.rs new file mode 100644 index 0000000000..c438e6e379 --- /dev/null +++ b/crates/bevy_platform/src/time/fallback.rs @@ -0,0 +1,177 @@ +//! Provides a fallback implementation of `Instant` from the standard library. + +#![expect( + unsafe_code, + reason = "Instant fallback requires unsafe to allow users to update the internal value" +)] + +use crate::sync::atomic::{AtomicPtr, Ordering}; + +use core::{ + fmt, + ops::{Add, AddAssign, Sub, SubAssign}, + time::Duration, +}; + +static ELAPSED_GETTER: AtomicPtr<()> = AtomicPtr::new(unset_getter as *mut _); + +/// Fallback implementation of `Instant` suitable for a `no_std` environment. +/// +/// If you are on any of the following target architectures, this is a drop-in replacement: +/// +/// - `x86` +/// - `x86_64` +/// - `aarch64` +/// +/// On any other architecture, you must call [`Instant::set_elapsed`], providing a method +/// which when called supplies a monotonically increasing count of elapsed nanoseconds relative +/// to some arbitrary point in time. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Instant(Duration); + +impl Instant { + /// Returns an instant corresponding to "now". + #[must_use] + pub fn now() -> Instant { + let getter = ELAPSED_GETTER.load(Ordering::Acquire); + + // SAFETY: Function pointer is always valid + let getter = unsafe { core::mem::transmute::<*mut (), fn() -> Duration>(getter) }; + + Self((getter)()) + } + + /// Provides a function returning the amount of time that has elapsed since execution began. + /// The getter provided to this method will be used by [`now`](Instant::now). + /// + /// # Safety + /// + /// - The function provided must accurately represent the elapsed time. + /// - The function must preserve all invariants of the [`Instant`] type. + /// - The pointer to the function must be valid whenever [`Instant::now`] is called. + pub unsafe fn set_elapsed(getter: fn() -> Duration) { + ELAPSED_GETTER.store(getter as *mut _, Ordering::Release); + } + + /// Returns the amount of time elapsed from another instant to this one, + /// or zero duration if that instant is later than this one. + #[must_use] + pub fn duration_since(&self, earlier: Instant) -> Duration { + self.saturating_duration_since(earlier) + } + + /// Returns the amount of time elapsed from another instant to this one, + /// or None if that instant is later than this one. + /// + /// Due to monotonicity bugs, even under correct logical ordering of the passed `Instant`s, + /// this method can return `None`. + #[must_use] + pub fn checked_duration_since(&self, earlier: Instant) -> Option { + self.0.checked_sub(earlier.0) + } + + /// Returns the amount of time elapsed from another instant to this one, + /// or zero duration if that instant is later than this one. + #[must_use] + pub fn saturating_duration_since(&self, earlier: Instant) -> Duration { + self.0.saturating_sub(earlier.0) + } + + /// Returns the amount of time elapsed since this instant. + #[must_use] + pub fn elapsed(&self) -> Duration { + Instant::now().saturating_duration_since(*self) + } + + /// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be represented as + /// `Instant` (which means it's inside the bounds of the underlying data structure), `None` + /// otherwise. + pub fn checked_add(&self, duration: Duration) -> Option { + self.0.checked_add(duration).map(Instant) + } + + /// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be represented as + /// `Instant` (which means it's inside the bounds of the underlying data structure), `None` + /// otherwise. + pub fn checked_sub(&self, duration: Duration) -> Option { + self.0.checked_sub(duration).map(Instant) + } +} + +impl Add for Instant { + type Output = Instant; + + /// # Panics + /// + /// This function may panic if the resulting point in time cannot be represented by the + /// underlying data structure. See [`Instant::checked_add`] for a version without panic. + fn add(self, other: Duration) -> Instant { + self.checked_add(other) + .expect("overflow when adding duration to instant") + } +} + +impl AddAssign for Instant { + fn add_assign(&mut self, other: Duration) { + *self = *self + other; + } +} + +impl Sub for Instant { + type Output = Instant; + + fn sub(self, other: Duration) -> Instant { + self.checked_sub(other) + .expect("overflow when subtracting duration from instant") + } +} + +impl SubAssign for Instant { + fn sub_assign(&mut self, other: Duration) { + *self = *self - other; + } +} + +impl Sub for Instant { + type Output = Duration; + + /// Returns the amount of time elapsed from another instant to this one, + /// or zero duration if that instant is later than this one. + fn sub(self, other: Instant) -> Duration { + self.duration_since(other) + } +} + +impl fmt::Debug for Instant { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +fn unset_getter() -> Duration { + cfg_if::cfg_if! { + if #[cfg(target_arch = "x86")] { + // SAFETY: standard technique for getting a nanosecond counter on x86 + let nanos = unsafe { + core::arch::x86::_rdtsc() + }; + Duration::from_nanos(nanos) + } else if #[cfg(target_arch = "x86_64")] { + // SAFETY: standard technique for getting a nanosecond counter on x86_64 + let nanos = unsafe { + core::arch::x86_64::_rdtsc() + }; + Duration::from_nanos(nanos) + } else if #[cfg(target_arch = "aarch64")] { + // SAFETY: standard technique for getting a nanosecond counter of aarch64 + let nanos = unsafe { + let mut ticks: u64; + core::arch::asm!("mrs {}, cntvct_el0", out(reg) ticks); + ticks + }; + Duration::from_nanos(nanos) + } else { + panic!("An elapsed time getter has not been provided to `Instant`. Please use `Instant::set_elapsed(...)` before calling `Instant::now()`") + } + } +} diff --git a/crates/bevy_platform/src/time/mod.rs b/crates/bevy_platform/src/time/mod.rs new file mode 100644 index 0000000000..260d8e4aea --- /dev/null +++ b/crates/bevy_platform/src/time/mod.rs @@ -0,0 +1,15 @@ +//! Provides `Instant` for all platforms. + +pub use time::Instant; + +cfg_if::cfg_if! { + if #[cfg(all(target_arch = "wasm32", feature = "web"))] { + use web_time as time; + } else if #[cfg(feature = "std")] { + use std::time; + } else { + mod fallback; + + use fallback as time; + } +} diff --git a/crates/bevy_platform_support/src/collections.rs b/crates/bevy_platform_support/src/collections.rs deleted file mode 100644 index 8a7fef6116..0000000000 --- a/crates/bevy_platform_support/src/collections.rs +++ /dev/null @@ -1,64 +0,0 @@ -//! Provides [`HashMap`] and [`HashSet`] from [`hashbrown`] with some customized defaults.\ -//! -//! Also provides the [`HashTable`] type, which is specific to [`hashbrown`]. - -pub use hash_map::HashMap; -pub use hash_set::HashSet; -pub use hash_table::HashTable; -pub use hashbrown::Equivalent; - -pub mod hash_map { - //! Provides [`HashMap`] - - use crate::hash::FixedHasher; - use hashbrown::hash_map as hb; - - // Re-exports to match `std::collections::hash_map` - pub use { - crate::hash::{DefaultHasher, RandomState}, - hb::{ - Drain, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, OccupiedEntry, VacantEntry, - Values, ValuesMut, - }, - }; - - // Additional items from `hashbrown` - pub use hb::{ - EntryRef, ExtractIf, OccupiedError, RawEntryBuilder, RawEntryBuilderMut, RawEntryMut, - RawOccupiedEntryMut, - }; - - /// Shortcut for [`HashMap`](hb::HashMap) with [`FixedHasher`] as the default hashing provider. - pub type HashMap = hb::HashMap; - - /// Shortcut for [`Entry`](hb::Entry) with [`FixedHasher`] as the default hashing provider. - pub type Entry<'a, K, V, S = FixedHasher> = hb::Entry<'a, K, V, S>; -} - -pub mod hash_set { - //! Provides [`HashSet`] - - use crate::hash::FixedHasher; - use hashbrown::hash_set as hb; - - // Re-exports to match `std::collections::hash_set` - pub use hb::{Difference, Drain, Intersection, IntoIter, Iter, SymmetricDifference, Union}; - - // Additional items from `hashbrown` - pub use hb::{ExtractIf, OccupiedEntry, VacantEntry}; - - /// Shortcut for [`HashSet`](hb::HashSet) with [`FixedHasher`] as the default hashing provider. - pub type HashSet = hb::HashSet; - - /// Shortcut for [`Entry`](hb::Entry) with [`FixedHasher`] as the default hashing provider. - pub type Entry<'a, T, S = FixedHasher> = hb::Entry<'a, T, S>; -} - -pub mod hash_table { - //! Provides [`HashTable`] - - pub use hashbrown::hash_table::{ - AbsentEntry, Drain, Entry, ExtractIf, HashTable, IntoIter, Iter, IterHash, IterHashMut, - IterMut, OccupiedEntry, VacantEntry, - }; -} diff --git a/crates/bevy_platform_support/src/sync/atomic.rs b/crates/bevy_platform_support/src/sync/atomic.rs deleted file mode 100644 index 9e8eadb3df..0000000000 --- a/crates/bevy_platform_support/src/sync/atomic.rs +++ /dev/null @@ -1,17 +0,0 @@ -//! Provides various atomic alternatives to language primitives. -//! -//! Certain platforms lack complete atomic support, requiring the use of a fallback -//! such as `portable-atomic`. -//! Using these types will ensure the correct atomic provider is used without the need for -//! feature gates in your own code. - -pub use atomic::{ - AtomicBool, AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, - AtomicU32, AtomicU64, AtomicU8, AtomicUsize, Ordering, -}; - -#[cfg(not(feature = "portable-atomic"))] -use core::sync::atomic; - -#[cfg(feature = "portable-atomic")] -use portable_atomic as atomic; diff --git a/crates/bevy_platform_support/src/time.rs b/crates/bevy_platform_support/src/time.rs deleted file mode 100644 index 79fc81c7fe..0000000000 --- a/crates/bevy_platform_support/src/time.rs +++ /dev/null @@ -1,194 +0,0 @@ -//! Provides `Instant` for all platforms. - -pub use time::Instant; - -// TODO: Create a `web` feature to enable WASI compatibility. -// See https://github.com/bevyengine/bevy/issues/4906 -#[cfg(target_arch = "wasm32")] -use web_time as time; - -#[cfg(all(not(target_arch = "wasm32"), feature = "std"))] -use std::time; - -#[cfg(all(not(target_arch = "wasm32"), not(feature = "std")))] -use fallback as time; - -#[cfg(all(not(target_arch = "wasm32"), not(feature = "std")))] -mod fallback { - //! Provides a fallback implementation of `Instant` from the standard library. - - #![expect( - unsafe_code, - reason = "Instant fallback requires unsafe to allow users to update the internal value" - )] - - use crate::sync::atomic::{AtomicPtr, Ordering}; - - use core::{ - fmt, - ops::{Add, AddAssign, Sub, SubAssign}, - time::Duration, - }; - - static ELAPSED_GETTER: AtomicPtr<()> = AtomicPtr::new(unset_getter as *mut _); - - /// Fallback implementation of `Instant` suitable for a `no_std` environment. - /// - /// If you are on any of the following target architectures, this is a drop-in replacement: - /// - /// - `x86` - /// - `x86_64` - /// - `aarch64` - /// - /// On any other architecture, you must call [`Instant::set_elapsed`], providing a method - /// which when called supplies a monotonically increasing count of elapsed nanoseconds relative - /// to some arbitrary point in time. - #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] - pub struct Instant(Duration); - - impl Instant { - /// Returns an instant corresponding to "now". - #[must_use] - pub fn now() -> Instant { - let getter = ELAPSED_GETTER.load(Ordering::Acquire); - - // SAFETY: Function pointer is always valid - let getter = unsafe { core::mem::transmute::<_, fn() -> Duration>(getter) }; - - Self((getter)()) - } - - /// Provides a function returning the amount of time that has elapsed since execution began. - /// The getter provided to this method will be used by [`now`](Instant::now). - /// - /// # Safety - /// - /// - The function provided must accurately represent the elapsed time. - /// - The function must preserve all invariants of the [`Instant`] type. - /// - The pointer to the function must be valid whenever [`Instant::now`] is called. - pub unsafe fn set_elapsed(getter: fn() -> Duration) { - ELAPSED_GETTER.store(getter as *mut _, Ordering::Release); - } - - /// Returns the amount of time elapsed from another instant to this one, - /// or zero duration if that instant is later than this one. - #[must_use] - pub fn duration_since(&self, earlier: Instant) -> Duration { - self.saturating_duration_since(earlier) - } - - /// Returns the amount of time elapsed from another instant to this one, - /// or None if that instant is later than this one. - /// - /// Due to monotonicity bugs, even under correct logical ordering of the passed `Instant`s, - /// this method can return `None`. - #[must_use] - pub fn checked_duration_since(&self, earlier: Instant) -> Option { - self.0.checked_sub(earlier.0) - } - - /// Returns the amount of time elapsed from another instant to this one, - /// or zero duration if that instant is later than this one. - #[must_use] - pub fn saturating_duration_since(&self, earlier: Instant) -> Duration { - self.0.saturating_sub(earlier.0) - } - - /// Returns the amount of time elapsed since this instant. - #[must_use] - pub fn elapsed(&self) -> Duration { - self.saturating_duration_since(Instant::now()) - } - - /// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be represented as - /// `Instant` (which means it's inside the bounds of the underlying data structure), `None` - /// otherwise. - pub fn checked_add(&self, duration: Duration) -> Option { - self.0.checked_add(duration).map(Instant) - } - - /// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be represented as - /// `Instant` (which means it's inside the bounds of the underlying data structure), `None` - /// otherwise. - pub fn checked_sub(&self, duration: Duration) -> Option { - self.0.checked_sub(duration).map(Instant) - } - } - - impl Add for Instant { - type Output = Instant; - - /// # Panics - /// - /// This function may panic if the resulting point in time cannot be represented by the - /// underlying data structure. See [`Instant::checked_add`] for a version without panic. - fn add(self, other: Duration) -> Instant { - self.checked_add(other) - .expect("overflow when adding duration to instant") - } - } - - impl AddAssign for Instant { - fn add_assign(&mut self, other: Duration) { - *self = *self + other; - } - } - - impl Sub for Instant { - type Output = Instant; - - fn sub(self, other: Duration) -> Instant { - self.checked_sub(other) - .expect("overflow when subtracting duration from instant") - } - } - - impl SubAssign for Instant { - fn sub_assign(&mut self, other: Duration) { - *self = *self - other; - } - } - - impl Sub for Instant { - type Output = Duration; - - /// Returns the amount of time elapsed from another instant to this one, - /// or zero duration if that instant is later than this one. - fn sub(self, other: Instant) -> Duration { - self.duration_since(other) - } - } - - impl fmt::Debug for Instant { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } - } - - fn unset_getter() -> Duration { - let _nanos: u64; - - #[cfg(target_arch = "x86")] - unsafe { - _nanos = core::arch::x86::_rdtsc(); - } - - #[cfg(target_arch = "x86_64")] - unsafe { - _nanos = core::arch::x86_64::_rdtsc(); - } - - #[cfg(target_arch = "aarch64")] - unsafe { - let mut ticks: u64; - core::arch::asm!("mrs {}, cntvct_el0", out(reg) ticks); - _nanos = ticks; - } - - #[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))] - panic!("An elapsed time getter has not been provided to `Instant`. Please use `Instant::set_elapsed(...)` before calling `Instant::now()`"); - - #[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))] - return Duration::from_nanos(_nanos); - } -} diff --git a/crates/bevy_ptr/Cargo.toml b/crates/bevy_ptr/Cargo.toml index d2c3db4fbc..0f56880bd4 100644 --- a/crates/bevy_ptr/Cargo.toml +++ b/crates/bevy_ptr/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "bevy_ptr" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Utilities for working with untyped pointers in a more safe way" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["bevy", "no_std"] -rust-version = "1.81.0" +rust-version = "1.85.0" [dependencies] diff --git a/crates/bevy_ptr/src/lib.rs b/crates/bevy_ptr/src/lib.rs index adb72409f9..1580f3f926 100644 --- a/crates/bevy_ptr/src/lib.rs +++ b/crates/bevy_ptr/src/lib.rs @@ -9,7 +9,7 @@ use core::{ cell::UnsafeCell, - fmt::{self, Formatter, Pointer}, + fmt::{self, Debug, Formatter, Pointer}, marker::PhantomData, mem::ManuallyDrop, num::NonZeroUsize, @@ -17,11 +17,11 @@ use core::{ }; /// Used as a type argument to [`Ptr`], [`PtrMut`] and [`OwningPtr`] to specify that the pointer is aligned. -#[derive(Copy, Clone)] +#[derive(Debug, Copy, Clone)] pub struct Aligned; /// Used as a type argument to [`Ptr`], [`PtrMut`] and [`OwningPtr`] to specify that the pointer is not aligned. -#[derive(Copy, Clone)] +#[derive(Debug, Copy, Clone)] pub struct Unaligned; /// Trait that is only implemented for [`Aligned`] and [`Unaligned`] to work around the lack of ability @@ -159,7 +159,7 @@ impl<'a, T: ?Sized> From<&'a mut T> for ConstNonNull { /// /// It may be helpful to think of this type as similar to `&'a dyn Any` but without /// the metadata and able to point to data that does not correspond to a Rust type. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone)] #[repr(transparent)] pub struct Ptr<'a, A: IsAligned = Aligned>(NonNull, PhantomData<(&'a u8, A)>); @@ -174,7 +174,6 @@ pub struct Ptr<'a, A: IsAligned = Aligned>(NonNull, PhantomData<(&'a u8, A)> /// /// It may be helpful to think of this type as similar to `&'a mut dyn Any` but without /// the metadata and able to point to data that does not correspond to a Rust type. -#[derive(Debug)] #[repr(transparent)] pub struct PtrMut<'a, A: IsAligned = Aligned>(NonNull, PhantomData<(&'a mut u8, A)>); @@ -194,7 +193,6 @@ pub struct PtrMut<'a, A: IsAligned = Aligned>(NonNull, PhantomData<(&'a mut /// /// It may be helpful to think of this type as similar to `&'a mut ManuallyDrop` but /// without the metadata and able to point to data that does not correspond to a Rust type. -#[derive(Debug)] #[repr(transparent)] pub struct OwningPtr<'a, A: IsAligned = Aligned>(NonNull, PhantomData<(&'a mut u8, A)>); @@ -265,6 +263,19 @@ macro_rules! impl_ptr { Pointer::fmt(&self.0, f) } } + + impl Debug for $ptr<'_, Aligned> { + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}({:?})", stringify!($ptr), self.0) + } + } + impl Debug for $ptr<'_, Unaligned> { + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}({:?})", stringify!($ptr), self.0) + } + } }; } @@ -413,9 +424,10 @@ impl<'a> OwningPtr<'a> { /// Consumes a value and creates an [`OwningPtr`] to it while ensuring a double drop does not happen. #[inline] pub fn make) -> R, R>(val: T, f: F) -> R { + let mut val = ManuallyDrop::new(val); // SAFETY: The value behind the pointer will not get dropped or observed later, // so it's safe to promote it to an owning pointer. - f(unsafe { Self::make_internal(&mut ManuallyDrop::new(val)) }) + f(unsafe { Self::make_internal(&mut val) }) } } diff --git a/crates/bevy_reflect/Cargo.toml b/crates/bevy_reflect/Cargo.toml index c15784d22d..bf85258700 100644 --- a/crates/bevy_reflect/Cargo.toml +++ b/crates/bevy_reflect/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "bevy_reflect" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Dynamically interact with rust types" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["bevy"] -rust-version = "1.81.0" +rust-version = "1.85.0" [features] default = ["std", "smallvec", "debug"] @@ -20,16 +20,13 @@ documentation = ["bevy_reflect_derive/documentation"] ## Enables function reflection functions = ["bevy_reflect_derive/functions"] -# When enabled, provides Bevy-related reflection implementations -bevy = ["smallvec", "smol_str"] - # Debugging Features ## Enables features useful for debugging reflection debug = ["debug_stack"] ## When enabled, keeps track of the current serialization/deserialization context for better error messages -debug_stack = [] +debug_stack = ["std"] # Integrations @@ -46,7 +43,7 @@ smallvec = ["dep:smallvec"] uuid = ["dep:uuid"] ## Adds reflection support to `wgpu-types` types. -wgpu-types = ["dep:wgpu-types", "std"] +wgpu-types = ["dep:wgpu-types"] # Platform Compatibility @@ -61,22 +58,20 @@ std = [ "glam?/std", "smol_str?/std", "uuid?/std", - "bevy_platform_support/std", + "bevy_platform/std", + "wgpu-types?/std", ] ## `critical-section` provides the building blocks for synchronization primitives ## on all platforms, including `no_std`. critical-section = [ - "bevy_platform_support/critical-section", + "bevy_platform/critical-section", "bevy_utils/critical-section", ] -## `portable-atomic` provides additional platform support for atomic types and -## operations, even on targets without native support. -portable-atomic = [ - "bevy_platform_support/portable-atomic", - "bevy_utils/portable-atomic", -] +## Enables use of browser APIs. +## Note this is currently only applicable on `wasm32` architectures. +web = ["bevy_platform/web", "uuid?/js"] [dependencies] # bevy @@ -85,7 +80,7 @@ bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev", default-features "alloc", ] } bevy_ptr = { path = "../bevy_ptr", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "alloc", "serialize", ] } @@ -104,10 +99,10 @@ derive_more = { version = "1", default-features = false, features = ["from"] } serde = { version = "1", default-features = false, features = ["alloc"] } assert_type_match = "0.1.1" smallvec = { version = "1.11", default-features = false, optional = true } -glam = { version = "0.29", default-features = false, features = [ +glam = { version = "0.29.3", default-features = false, features = [ "serde", ], optional = true } -petgraph = { version = "0.6", features = ["serde-1"], optional = true } +petgraph = { version = "0.7", features = ["serde-1"], optional = true } smol_str = { version = "0.2.0", default-features = false, features = [ "serde", ], optional = true } @@ -116,15 +111,14 @@ uuid = { version = "1.13.1", default-features = false, optional = true, features "serde", ] } variadics_please = "1.1" -wgpu-types = { version = "24", features = ["serde"], optional = true } - -[target.'cfg(target_arch = "wasm32")'.dependencies] -uuid = { version = "1.13.1", default-features = false, features = ["js"] } +wgpu-types = { version = "24", features = [ + "serde", +], optional = true, default-features = false } [dev-dependencies] ron = "0.8.0" rmp-serde = "1.1" -bincode = "1.3" +bincode = { version = "2.0", features = ["serde"] } serde_json = "1.0" serde = { version = "1", features = ["derive"] } static_assertions = "1.1.0" diff --git a/crates/bevy_reflect/compile_fail/Cargo.toml b/crates/bevy_reflect/compile_fail/Cargo.toml index 14e5eb2264..178711c5d0 100644 --- a/crates/bevy_reflect/compile_fail/Cargo.toml +++ b/crates/bevy_reflect/compile_fail/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bevy_reflect_compile_fail" -edition = "2021" +edition = "2024" description = "Compile fail tests for Bevy Engine's reflection system" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_reflect/compile_fail/tests/reflect_remote/nested_fail.rs b/crates/bevy_reflect/compile_fail/tests/reflect_remote/nested_fail.rs index 0f8ade8e23..457f1f75e5 100644 --- a/crates/bevy_reflect/compile_fail/tests/reflect_remote/nested_fail.rs +++ b/crates/bevy_reflect/compile_fail/tests/reflect_remote/nested_fail.rs @@ -26,6 +26,7 @@ mod incorrect_inner_type { //~| ERROR: `TheirInner` does not implement `PartialReflect` so cannot be introspected //~| ERROR: `TheirInner` does not implement `PartialReflect` so cannot be introspected //~| ERROR: `TheirInner` does not implement `TypePath` so cannot provide dynamic type path information + //~| ERROR: `TheirInner` does not implement `TypePath` so cannot provide dynamic type path information //~| ERROR: `?` operator has incompatible types struct MyOuter { // Reason: Should not use `MyInner` directly diff --git a/crates/bevy_reflect/derive/Cargo.toml b/crates/bevy_reflect/derive/Cargo.toml index bbdca03ca7..ad6ec8cd2f 100644 --- a/crates/bevy_reflect/derive/Cargo.toml +++ b/crates/bevy_reflect/derive/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_reflect_derive" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Derive implementations for bevy_reflect" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -26,6 +26,7 @@ syn = { version = "2.0", features = ["full"] } uuid = { version = "1.13.1", features = ["v4"] } [target.'cfg(target_arch = "wasm32")'.dependencies] +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. uuid = { version = "1.13.1", default-features = false, features = ["js"] } [lints] diff --git a/crates/bevy_reflect/derive/src/container_attributes.rs b/crates/bevy_reflect/derive/src/container_attributes.rs index bdb94db06b..8b2ae0351f 100644 --- a/crates/bevy_reflect/derive/src/container_attributes.rs +++ b/crates/bevy_reflect/derive/src/container_attributes.rs @@ -9,7 +9,7 @@ use crate::{ attribute_parser::terminated_parser, custom_attributes::CustomAttributes, derive_data::ReflectTraitToImpl, }; -use bevy_macro_utils::fq_std::{FQAny, FQOption}; +use bevy_macro_utils::fq_std::{FQAny, FQClone, FQOption, FQResult}; use proc_macro2::{Ident, Span}; use quote::quote_spanned; use syn::{ @@ -23,6 +23,7 @@ mod kw { syn::custom_keyword!(Debug); syn::custom_keyword!(PartialEq); syn::custom_keyword!(Hash); + syn::custom_keyword!(Clone); syn::custom_keyword!(no_field_bounds); syn::custom_keyword!(opaque); } @@ -175,6 +176,7 @@ impl TypePathAttrs { /// > __Note:__ Registering a custom function only works for special traits. #[derive(Default, Clone)] pub(crate) struct ContainerAttributes { + clone: TraitImpl, debug: TraitImpl, hash: TraitImpl, partial_eq: TraitImpl, @@ -236,12 +238,14 @@ impl ContainerAttributes { self.parse_opaque(input) } else if lookahead.peek(kw::no_field_bounds) { self.parse_no_field_bounds(input) + } else if lookahead.peek(kw::Clone) { + self.parse_clone(input) } else if lookahead.peek(kw::Debug) { self.parse_debug(input) - } else if lookahead.peek(kw::PartialEq) { - self.parse_partial_eq(input) } else if lookahead.peek(kw::Hash) { self.parse_hash(input) + } else if lookahead.peek(kw::PartialEq) { + self.parse_partial_eq(input) } else if lookahead.peek(Ident::peek_any) { self.parse_ident(input) } else { @@ -274,6 +278,26 @@ impl ContainerAttributes { Ok(()) } + /// Parse `clone` attribute. + /// + /// Examples: + /// - `#[reflect(Clone)]` + /// - `#[reflect(Clone(custom_clone_fn))]` + fn parse_clone(&mut self, input: ParseStream) -> syn::Result<()> { + let ident = input.parse::()?; + + if input.peek(token::Paren) { + let content; + parenthesized!(content in input); + let path = content.parse::()?; + self.clone.merge(TraitImpl::Custom(path, ident.span))?; + } else { + self.clone = TraitImpl::Implemented(ident.span); + } + + Ok(()) + } + /// Parse special `Debug` registration. /// /// Examples: @@ -377,9 +401,11 @@ impl ContainerAttributes { // Override `lit` if this is a `FromReflect` derive. // This typically means a user is opting out of the default implementation // from the `Reflect` derive and using the `FromReflect` derive directly instead. - (trait_ == ReflectTraitToImpl::FromReflect) - .then(|| LitBool::new(true, Span::call_site())) - .unwrap_or_else(|| lit.clone()) + if trait_ == ReflectTraitToImpl::FromReflect { + LitBool::new(true, Span::call_site()) + } else { + lit.clone() + } })?; if let Some(existing) = &self.from_reflect_attrs.auto_derive { @@ -410,9 +436,11 @@ impl ContainerAttributes { // Override `lit` if this is a `FromReflect` derive. // This typically means a user is opting out of the default implementation // from the `Reflect` derive and using the `FromReflect` derive directly instead. - (trait_ == ReflectTraitToImpl::TypePath) - .then(|| LitBool::new(true, Span::call_site())) - .unwrap_or_else(|| lit.clone()) + if trait_ == ReflectTraitToImpl::TypePath { + LitBool::new(true, Span::call_site()) + } else { + lit.clone() + } })?; if let Some(existing) = &self.type_path_attrs.auto_derive { @@ -523,6 +551,24 @@ impl ContainerAttributes { } } + pub fn get_clone_impl(&self, bevy_reflect_path: &Path) -> Option { + match &self.clone { + &TraitImpl::Implemented(span) => Some(quote_spanned! {span=> + #[inline] + fn reflect_clone(&self) -> #FQResult<#bevy_reflect_path::__macro_exports::alloc_utils::Box, #bevy_reflect_path::ReflectCloneError> { + #FQResult::Ok(#bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#FQClone::clone(self))) + } + }), + &TraitImpl::Custom(ref impl_fn, span) => Some(quote_spanned! {span=> + #[inline] + fn reflect_clone(&self) -> #FQResult<#bevy_reflect_path::__macro_exports::alloc_utils::Box, #bevy_reflect_path::ReflectCloneError> { + #FQResult::Ok(#bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#impl_fn(self))) + } + }), + TraitImpl::NotImplemented => None, + } + } + pub fn custom_attributes(&self) -> &CustomAttributes { &self.custom_attributes } diff --git a/crates/bevy_reflect/derive/src/derive_data.rs b/crates/bevy_reflect/derive/src/derive_data.rs index e739c91ebb..f825cb2905 100644 --- a/crates/bevy_reflect/derive/src/derive_data.rs +++ b/crates/bevy_reflect/derive/src/derive_data.rs @@ -12,13 +12,17 @@ use crate::{ where_clause_options::WhereClauseOptions, REFLECT_ATTRIBUTE_NAME, TYPE_NAME_ATTRIBUTE_NAME, TYPE_PATH_ATTRIBUTE_NAME, }; -use quote::{quote, ToTokens}; +use quote::{format_ident, quote, ToTokens}; use syn::token::Comma; +use crate::enum_utility::{EnumVariantOutputData, ReflectCloneVariantBuilder, VariantBuilder}; +use crate::field_attributes::CloneBehavior; use crate::generics::generate_generics; +use bevy_macro_utils::fq_std::{FQClone, FQOption, FQResult}; use syn::{ parse_str, punctuated::Punctuated, spanned::Spanned, Data, DeriveInput, Field, Fields, - GenericParam, Generics, Ident, LitStr, Meta, Path, PathSegment, Type, TypeParam, Variant, + GenericParam, Generics, Ident, LitStr, Member, Meta, Path, PathSegment, Type, TypeParam, + Variant, }; pub(crate) enum ReflectDerive<'a> { @@ -266,7 +270,7 @@ impl<'a> ReflectDerive<'a> { { return Err(syn::Error::new( meta.type_path().span(), - format!("a #[{TYPE_PATH_ATTRIBUTE_NAME} = \"...\"] attribute must be specified when using {provenance}") + format!("a #[{TYPE_PATH_ATTRIBUTE_NAME} = \"...\"] attribute must be specified when using {provenance}"), )); } @@ -546,6 +550,31 @@ impl<'a> StructField<'a> { pub fn attrs(&self) -> &FieldAttributes { &self.attrs } + + /// Generates a [`Member`] based on this field. + /// + /// If the field is unnamed, the declaration index is used. + /// This allows this member to be used for both active and ignored fields. + pub fn to_member(&self) -> Member { + match &self.data.ident { + Some(ident) => Member::Named(ident.clone()), + None => Member::Unnamed(self.declaration_index.into()), + } + } + + /// Returns a token stream for generating a `FieldId` for this field. + pub fn field_id(&self, bevy_reflect_path: &Path) -> proc_macro2::TokenStream { + match &self.data.ident { + Some(ident) => { + let name = ident.to_string(); + quote!(#bevy_reflect_path::FieldId::Named(#bevy_reflect_path::__macro_exports::alloc_utils::Cow::Borrowed(#name))) + } + None => { + let index = self.declaration_index; + quote!(#bevy_reflect_path::FieldId::Unnamed(#index)) + } + } + } } impl<'a> ReflectStruct<'a> { @@ -655,6 +684,135 @@ impl<'a> ReflectStruct<'a> { #bevy_reflect_path::TypeInfo::#info_variant(#info) } } + /// Returns the `Reflect::reflect_clone` impl, if any, as a `TokenStream`. + pub fn get_clone_impl(&self) -> Option { + let bevy_reflect_path = self.meta().bevy_reflect_path(); + + if let container_clone @ Some(_) = self.meta().attrs().get_clone_impl(bevy_reflect_path) { + return container_clone; + } + + let mut tokens = proc_macro2::TokenStream::new(); + + for field in self.fields().iter() { + let field_ty = field.reflected_type(); + let member = field.to_member(); + let accessor = self.access_for_field(field, false); + + match &field.attrs.clone { + CloneBehavior::Default => { + let value = if field.attrs.ignore.is_ignored() { + let field_id = field.field_id(bevy_reflect_path); + + quote! { + return #FQResult::Err(#bevy_reflect_path::ReflectCloneError::FieldNotCloneable { + field: #field_id, + variant: #FQOption::None, + container_type_path: #bevy_reflect_path::__macro_exports::alloc_utils::Cow::Borrowed( + ::type_path() + ) + }) + } + } else { + quote! { + #bevy_reflect_path::PartialReflect::reflect_clone(#accessor)? + .take() + .map_err(|value| #bevy_reflect_path::ReflectCloneError::FailedDowncast { + expected: #bevy_reflect_path::__macro_exports::alloc_utils::Cow::Borrowed( + <#field_ty as #bevy_reflect_path::TypePath>::type_path() + ), + received: #bevy_reflect_path::__macro_exports::alloc_utils::Cow::Owned( + #bevy_reflect_path::__macro_exports::alloc_utils::ToString::to_string( + #bevy_reflect_path::DynamicTypePath::reflect_type_path(&*value) + ) + ), + })? + } + }; + + tokens.extend(quote! { + #member: #value, + }); + } + CloneBehavior::Trait => { + tokens.extend(quote! { + #member: #FQClone::clone(#accessor), + }); + } + CloneBehavior::Func(clone_fn) => { + tokens.extend(quote! { + #member: #clone_fn(#accessor), + }); + } + } + } + + let ctor = match self.meta.remote_ty() { + Some(ty) => { + let ty = ty.as_expr_path().ok()?.to_token_stream(); + quote! { + Self(#ty { + #tokens + }) + } + } + None => { + quote! { + Self { + #tokens + } + } + } + }; + + Some(quote! { + #[inline] + #[allow(unreachable_code, reason = "Ignored fields without a `clone` attribute will early-return with an error")] + fn reflect_clone(&self) -> #FQResult<#bevy_reflect_path::__macro_exports::alloc_utils::Box, #bevy_reflect_path::ReflectCloneError> { + #FQResult::Ok(#bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#ctor)) + } + }) + } + + /// Generates an accessor for the given field. + /// + /// The mutability of the access can be controlled by the `is_mut` parameter. + /// + /// Generally, this just returns something like `&self.field`. + /// However, if the struct is a remote wrapper, this then becomes `&self.0.field` in order to access the field on the inner type. + /// + /// If the field itself is a remote type, the above accessor is further wrapped in a call to `ReflectRemote::as_wrapper[_mut]`. + pub fn access_for_field( + &self, + field: &StructField<'a>, + is_mutable: bool, + ) -> proc_macro2::TokenStream { + let bevy_reflect_path = self.meta().bevy_reflect_path(); + let member = field.to_member(); + + let prefix_tokens = if is_mutable { quote!(&mut) } else { quote!(&) }; + + let accessor = if self.meta.is_remote_wrapper() { + quote!(self.0.#member) + } else { + quote!(self.#member) + }; + + match &field.attrs.remote { + Some(wrapper_ty) => { + let method = if is_mutable { + format_ident!("as_wrapper_mut") + } else { + format_ident!("as_wrapper") + }; + + quote! { + <#wrapper_ty as #bevy_reflect_path::ReflectRemote>::#method(#prefix_tokens #accessor) + } + } + None => quote!(#prefix_tokens #accessor), + } + } } impl<'a> ReflectEnum<'a> { @@ -757,6 +915,51 @@ impl<'a> ReflectEnum<'a> { #bevy_reflect_path::TypeInfo::Enum(#info) } } + + /// Returns the `Reflect::reflect_clone` impl, if any, as a `TokenStream`. + pub fn get_clone_impl(&self) -> Option { + let bevy_reflect_path = self.meta().bevy_reflect_path(); + + if let container_clone @ Some(_) = self.meta().attrs().get_clone_impl(bevy_reflect_path) { + return container_clone; + } + + let this = Ident::new("this", Span::call_site()); + let EnumVariantOutputData { + variant_patterns, + variant_constructors, + .. + } = ReflectCloneVariantBuilder::new(self).build(&this); + + let inner = quote! { + match #this { + #(#variant_patterns => #variant_constructors),* + } + }; + + let body = if variant_patterns.is_empty() { + // enum variant is empty, so &self will never exist + quote!(unreachable!()) + } else if self.meta.is_remote_wrapper() { + quote! { + let #this = ::as_remote(self); + #FQResult::Ok(#bevy_reflect_path::__macro_exports::alloc_utils::Box::new(::into_wrapper(#inner))) + } + } else { + quote! { + let #this = self; + #FQResult::Ok(#bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#inner)) + } + }; + + Some(quote! { + #[inline] + #[allow(unreachable_code, reason = "Ignored fields without a `clone` attribute will early-return with an error")] + fn reflect_clone(&self) -> #FQResult<#bevy_reflect_path::__macro_exports::alloc_utils::Box, #bevy_reflect_path::ReflectCloneError> { + #body + } + }) + } } impl<'a> EnumVariant<'a> { @@ -897,7 +1100,7 @@ pub(crate) enum ReflectTypePath<'a> { reason = "Not currently used but may be useful in the future due to its generality." )] Anonymous { - qualified_type: Type, + qualified_type: Box, long_type_path: StringExpr, short_type_path: StringExpr, }, diff --git a/crates/bevy_reflect/derive/src/enum_utility.rs b/crates/bevy_reflect/derive/src/enum_utility.rs index f4b1e5ede8..5571b861a6 100644 --- a/crates/bevy_reflect/derive/src/enum_utility.rs +++ b/crates/bevy_reflect/derive/src/enum_utility.rs @@ -1,16 +1,21 @@ +use crate::field_attributes::CloneBehavior; use crate::{ derive_data::ReflectEnum, derive_data::StructField, field_attributes::DefaultBehavior, ident::ident_or_index, }; -use bevy_macro_utils::fq_std::{FQDefault, FQOption}; +use bevy_macro_utils::fq_std::{FQClone, FQDefault, FQOption, FQResult}; use proc_macro2::{Ident, TokenStream}; -use quote::{format_ident, quote}; +use quote::{format_ident, quote, ToTokens}; pub(crate) struct EnumVariantOutputData { /// The names of each variant as a string. /// /// For example, `Some` and `None` for the `Option` enum. pub variant_names: Vec, + /// The pattern matching portion of each variant. + /// + /// For example, `Option::Some { 0: _0 }` and `Option::None {}` for the `Option` enum. + pub variant_patterns: Vec, /// The constructor portion of each variant. /// /// For example, `Option::Some { 0: value }` and `Option::None {}` for the `Option` enum. @@ -139,6 +144,7 @@ pub(crate) trait VariantBuilder: Sized { let variants = self.reflect_enum().variants(); let mut variant_names = Vec::with_capacity(variants.len()); + let mut variant_patterns = Vec::with_capacity(variants.len()); let mut variant_constructors = Vec::with_capacity(variants.len()); for variant in variants { @@ -148,7 +154,10 @@ pub(crate) trait VariantBuilder: Sized { let fields = variant.fields(); - let field_constructors = fields.iter().map(|field| { + let mut field_patterns = Vec::with_capacity(fields.len()); + let mut field_constructors = Vec::with_capacity(fields.len()); + + for field in fields { let member = ident_or_index(field.data.ident.as_ref(), field.declaration_index); let alias = format_ident!("_{}", member); @@ -164,12 +173,18 @@ pub(crate) trait VariantBuilder: Sized { self.on_active_field(this, variant_field) }; - let constructor = quote! { - #member: #value - }; + field_patterns.push(quote! { + #member: #alias + }); - constructor - }); + field_constructors.push(quote! { + #member: #value + }); + } + + let pattern = quote! { + #variant_path { #( #field_patterns ),* } + }; let constructor = quote! { #variant_path { @@ -178,11 +193,13 @@ pub(crate) trait VariantBuilder: Sized { }; variant_names.push(variant_name); + variant_patterns.push(pattern); variant_constructors.push(constructor); } EnumVariantOutputData { variant_names, + variant_patterns, variant_constructors, } } @@ -275,3 +292,103 @@ impl<'a> VariantBuilder for TryApplyVariantBuilder<'a> { } } } + +/// Generates the enum variant output data needed to build the `Reflect::reflect_clone` implementation. +pub(crate) struct ReflectCloneVariantBuilder<'a> { + reflect_enum: &'a ReflectEnum<'a>, +} + +impl<'a> ReflectCloneVariantBuilder<'a> { + pub fn new(reflect_enum: &'a ReflectEnum) -> Self { + Self { reflect_enum } + } +} + +impl<'a> VariantBuilder for ReflectCloneVariantBuilder<'a> { + fn reflect_enum(&self) -> &ReflectEnum { + self.reflect_enum + } + + fn access_field(&self, _ident: &Ident, field: VariantField) -> TokenStream { + let alias = field.alias; + quote!(#FQOption::Some(#alias)) + } + + fn unwrap_field(&self, field: VariantField) -> TokenStream { + let alias = field.alias; + quote!(#alias.unwrap()) + } + + fn construct_field(&self, field: VariantField) -> TokenStream { + let bevy_reflect_path = self.reflect_enum.meta().bevy_reflect_path(); + + let field_ty = field.field.reflected_type(); + + let alias = field.alias; + let alias = match &field.field.attrs.remote { + Some(wrapper_ty) => { + quote! { + <#wrapper_ty as #bevy_reflect_path::ReflectRemote>::as_wrapper(#alias) + } + } + None => alias.to_token_stream(), + }; + + match &field.field.attrs.clone { + CloneBehavior::Default => { + quote! { + #bevy_reflect_path::PartialReflect::reflect_clone(#alias)? + .take() + .map_err(|value| #bevy_reflect_path::ReflectCloneError::FailedDowncast { + expected: #bevy_reflect_path::__macro_exports::alloc_utils::Cow::Borrowed( + <#field_ty as #bevy_reflect_path::TypePath>::type_path() + ), + received: #bevy_reflect_path::__macro_exports::alloc_utils::Cow::Owned( + #bevy_reflect_path::__macro_exports::alloc_utils::ToString::to_string( + #bevy_reflect_path::DynamicTypePath::reflect_type_path(&*value) + ) + ), + })? + } + } + CloneBehavior::Trait => { + quote! { + #FQClone::clone(#alias) + } + } + CloneBehavior::Func(clone_fn) => { + quote! { + #clone_fn(#alias) + } + } + } + } + + fn on_active_field(&self, _this: &Ident, field: VariantField) -> TokenStream { + self.construct_field(field) + } + + fn on_ignored_field(&self, field: VariantField) -> TokenStream { + let bevy_reflect_path = self.reflect_enum.meta().bevy_reflect_path(); + let variant_name = field.variant_name; + let alias = field.alias; + + match &field.field.attrs.clone { + CloneBehavior::Default => { + let field_id = field.field.field_id(bevy_reflect_path); + + quote! { + return #FQResult::Err( + #bevy_reflect_path::ReflectCloneError::FieldNotCloneable { + field: #field_id, + variant: #FQOption::Some(#bevy_reflect_path::__macro_exports::alloc_utils::Cow::Borrowed(#variant_name)), + container_type_path: #bevy_reflect_path::__macro_exports::alloc_utils::Cow::Borrowed(::type_path()) + } + ) + } + } + CloneBehavior::Trait => quote! { #FQClone::clone(#alias) }, + CloneBehavior::Func(clone_fn) => quote! { #clone_fn() }, + } + } +} diff --git a/crates/bevy_reflect/derive/src/field_attributes.rs b/crates/bevy_reflect/derive/src/field_attributes.rs index 6cddb50e61..06d64791c4 100644 --- a/crates/bevy_reflect/derive/src/field_attributes.rs +++ b/crates/bevy_reflect/derive/src/field_attributes.rs @@ -14,6 +14,7 @@ use syn::{parse::ParseStream, Attribute, LitStr, Meta, Token, Type}; mod kw { syn::custom_keyword!(ignore); syn::custom_keyword!(skip_serializing); + syn::custom_keyword!(clone); syn::custom_keyword!(default); syn::custom_keyword!(remote); } @@ -22,6 +23,7 @@ pub(crate) const IGNORE_SERIALIZATION_ATTR: &str = "skip_serializing"; pub(crate) const IGNORE_ALL_ATTR: &str = "ignore"; pub(crate) const DEFAULT_ATTR: &str = "default"; +pub(crate) const CLONE_ATTR: &str = "clone"; /// Stores data about if the field should be visible via the Reflect and serialization interfaces /// @@ -54,6 +56,14 @@ impl ReflectIgnoreBehavior { } } +#[derive(Default, Clone)] +pub(crate) enum CloneBehavior { + #[default] + Default, + Trait, + Func(syn::ExprPath), +} + /// Controls how the default value is determined for a field. #[derive(Default, Clone)] pub(crate) enum DefaultBehavior { @@ -74,6 +84,8 @@ pub(crate) enum DefaultBehavior { pub(crate) struct FieldAttributes { /// Determines how this field should be ignored if at all. pub ignore: ReflectIgnoreBehavior, + /// Sets the clone behavior of this field. + pub clone: CloneBehavior, /// Sets the default behavior of this field. pub default: DefaultBehavior, /// Custom attributes created via `#[reflect(@...)]`. @@ -121,6 +133,8 @@ impl FieldAttributes { self.parse_ignore(input) } else if lookahead.peek(kw::skip_serializing) { self.parse_skip_serializing(input) + } else if lookahead.peek(kw::clone) { + self.parse_clone(input) } else if lookahead.peek(kw::default) { self.parse_default(input) } else if lookahead.peek(kw::remote) { @@ -164,6 +178,30 @@ impl FieldAttributes { Ok(()) } + /// Parse `clone` attribute. + /// + /// Examples: + /// - `#[reflect(clone)]` + /// - `#[reflect(clone = "path::to::func")]` + fn parse_clone(&mut self, input: ParseStream) -> syn::Result<()> { + if !matches!(self.clone, CloneBehavior::Default) { + return Err(input.error(format!("only one of {:?} is allowed", [CLONE_ATTR]))); + } + + input.parse::()?; + + if input.peek(Token![=]) { + input.parse::()?; + + let lit = input.parse::()?; + self.clone = CloneBehavior::Func(lit.parse()?); + } else { + self.clone = CloneBehavior::Trait; + } + + Ok(()) + } + /// Parse `default` attribute. /// /// Examples: diff --git a/crates/bevy_reflect/derive/src/impls/enums.rs b/crates/bevy_reflect/derive/src/impls/enums.rs index 235a7cff1c..3cbd8cce95 100644 --- a/crates/bevy_reflect/derive/src/impls/enums.rs +++ b/crates/bevy_reflect/derive/src/impls/enums.rs @@ -70,6 +70,7 @@ pub(crate) fn impl_enum(reflect_enum: &ReflectEnum) -> proc_macro2::TokenStream || Some(quote!(#bevy_reflect_path::enum_partial_eq)), || Some(quote!(#bevy_reflect_path::enum_hash)), ); + let clone_fn = reflect_enum.get_clone_impl(); #[cfg(not(feature = "functions"))] let function_impls = None::; @@ -174,7 +175,7 @@ pub(crate) fn impl_enum(reflect_enum: &ReflectEnum) -> proc_macro2::TokenStream } } - fn clone_dynamic(&self) -> #bevy_reflect_path::DynamicEnum { + fn to_dynamic_enum(&self) -> #bevy_reflect_path::DynamicEnum { #bevy_reflect_path::DynamicEnum::from_ref::(self) } } @@ -185,11 +186,6 @@ pub(crate) fn impl_enum(reflect_enum: &ReflectEnum) -> proc_macro2::TokenStream #FQOption::Some(::type_info()) } - #[inline] - fn clone_value(&self) -> #bevy_reflect_path::__macro_exports::alloc_utils::Box { - #bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#bevy_reflect_path::Enum::clone_dynamic(self)) - } - #[inline] fn try_apply( &mut self, @@ -261,6 +257,8 @@ pub(crate) fn impl_enum(reflect_enum: &ReflectEnum) -> proc_macro2::TokenStream } #common_methods + + #clone_fn } } } diff --git a/crates/bevy_reflect/derive/src/impls/opaque.rs b/crates/bevy_reflect/derive/src/impls/opaque.rs index bdee656a96..2a08cadc28 100644 --- a/crates/bevy_reflect/derive/src/impls/opaque.rs +++ b/crates/bevy_reflect/derive/src/impls/opaque.rs @@ -32,6 +32,7 @@ pub(crate) fn impl_opaque(meta: &ReflectMeta) -> proc_macro2::TokenStream { let type_path_impl = impl_type_path(meta); let full_reflect_impl = impl_full_reflect(meta, &where_clause_options); let common_methods = common_partial_reflect_methods(meta, || None, || None); + let clone_fn = meta.attrs().get_clone_impl(bevy_reflect_path); let apply_impl = if let Some(remote_ty) = meta.remote_ty() { let ty = remote_ty.type_path(); @@ -77,7 +78,7 @@ pub(crate) fn impl_opaque(meta: &ReflectMeta) -> proc_macro2::TokenStream { } #[inline] - fn clone_value(&self) -> #bevy_reflect_path::__macro_exports::alloc_utils::Box { + fn to_dynamic(&self) -> #bevy_reflect_path::__macro_exports::alloc_utils::Box { #bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#FQClone::clone(self)) } @@ -117,6 +118,8 @@ pub(crate) fn impl_opaque(meta: &ReflectMeta) -> proc_macro2::TokenStream { } #common_methods + + #clone_fn } } } diff --git a/crates/bevy_reflect/derive/src/impls/structs.rs b/crates/bevy_reflect/derive/src/impls/structs.rs index c1db19ca9c..7e10de3f2b 100644 --- a/crates/bevy_reflect/derive/src/impls/structs.rs +++ b/crates/bevy_reflect/derive/src/impls/structs.rs @@ -47,6 +47,7 @@ pub(crate) fn impl_struct(reflect_struct: &ReflectStruct) -> proc_macro2::TokenS || Some(quote!(#bevy_reflect_path::struct_partial_eq)), || None, ); + let clone_fn = reflect_struct.get_clone_impl(); #[cfg(not(feature = "functions"))] let function_impls = None::; @@ -119,10 +120,10 @@ pub(crate) fn impl_struct(reflect_struct: &ReflectStruct) -> proc_macro2::TokenS #bevy_reflect_path::FieldIter::new(self) } - fn clone_dynamic(&self) -> #bevy_reflect_path::DynamicStruct { + fn to_dynamic_struct(&self) -> #bevy_reflect_path::DynamicStruct { let mut dynamic: #bevy_reflect_path::DynamicStruct = #FQDefault::default(); dynamic.set_represented_type(#bevy_reflect_path::PartialReflect::get_represented_type_info(self)); - #(dynamic.insert_boxed(#field_names, #bevy_reflect_path::PartialReflect::clone_value(#fields_ref));)* + #(dynamic.insert_boxed(#field_names, #bevy_reflect_path::PartialReflect::to_dynamic(#fields_ref));)* dynamic } } @@ -133,11 +134,6 @@ pub(crate) fn impl_struct(reflect_struct: &ReflectStruct) -> proc_macro2::TokenS #FQOption::Some(::type_info()) } - #[inline] - fn clone_value(&self) -> #bevy_reflect_path::__macro_exports::alloc_utils::Box { - #bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#bevy_reflect_path::Struct::clone_dynamic(self)) - } - #[inline] fn try_apply( &mut self, @@ -179,6 +175,8 @@ pub(crate) fn impl_struct(reflect_struct: &ReflectStruct) -> proc_macro2::TokenS } #common_methods + + #clone_fn } } } diff --git a/crates/bevy_reflect/derive/src/impls/tuple_structs.rs b/crates/bevy_reflect/derive/src/impls/tuple_structs.rs index a0037c64ca..90c3555230 100644 --- a/crates/bevy_reflect/derive/src/impls/tuple_structs.rs +++ b/crates/bevy_reflect/derive/src/impls/tuple_structs.rs @@ -37,6 +37,7 @@ pub(crate) fn impl_tuple_struct(reflect_struct: &ReflectStruct) -> proc_macro2:: || Some(quote!(#bevy_reflect_path::tuple_struct_partial_eq)), || None, ); + let clone_fn = reflect_struct.get_clone_impl(); #[cfg(not(feature = "functions"))] let function_impls = None::; @@ -86,10 +87,10 @@ pub(crate) fn impl_tuple_struct(reflect_struct: &ReflectStruct) -> proc_macro2:: #bevy_reflect_path::TupleStructFieldIter::new(self) } - fn clone_dynamic(&self) -> #bevy_reflect_path::DynamicTupleStruct { + fn to_dynamic_tuple_struct(&self) -> #bevy_reflect_path::DynamicTupleStruct { let mut dynamic: #bevy_reflect_path::DynamicTupleStruct = #FQDefault::default(); dynamic.set_represented_type(#bevy_reflect_path::PartialReflect::get_represented_type_info(self)); - #(dynamic.insert_boxed(#bevy_reflect_path::PartialReflect::clone_value(#fields_ref));)* + #(dynamic.insert_boxed(#bevy_reflect_path::PartialReflect::to_dynamic(#fields_ref));)* dynamic } } @@ -99,10 +100,6 @@ pub(crate) fn impl_tuple_struct(reflect_struct: &ReflectStruct) -> proc_macro2:: fn get_represented_type_info(&self) -> #FQOption<&'static #bevy_reflect_path::TypeInfo> { #FQOption::Some(::type_info()) } - #[inline] - fn clone_value(&self) -> #bevy_reflect_path::__macro_exports::alloc_utils::Box { - #bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#bevy_reflect_path::TupleStruct::clone_dynamic(self)) - } #[inline] fn try_apply( @@ -144,6 +141,8 @@ pub(crate) fn impl_tuple_struct(reflect_struct: &ReflectStruct) -> proc_macro2:: } #common_methods + + #clone_fn } } } diff --git a/crates/bevy_reflect/derive/src/lib.rs b/crates/bevy_reflect/derive/src/lib.rs index 276371427b..2d9dfca681 100644 --- a/crates/bevy_reflect/derive/src/lib.rs +++ b/crates/bevy_reflect/derive/src/lib.rs @@ -156,20 +156,25 @@ fn match_reflect_impls(ast: DeriveInput, source: ReflectImplSource) -> TokenStre /// /// There are a few "special" identifiers that work a bit differently: /// +/// * `#[reflect(Clone)]` will force the implementation of `Reflect::reflect_clone` to rely on +/// the type's [`Clone`] implementation. +/// A custom implementation may be provided using `#[reflect(Clone(my_clone_func))]` where +/// `my_clone_func` is the path to a function matching the signature: +/// `(&Self) -> Self`. /// * `#[reflect(Debug)]` will force the implementation of `Reflect::reflect_debug` to rely on /// the type's [`Debug`] implementation. /// A custom implementation may be provided using `#[reflect(Debug(my_debug_func))]` where /// `my_debug_func` is the path to a function matching the signature: -/// `(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result`. +/// `(&Self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result`. /// * `#[reflect(PartialEq)]` will force the implementation of `Reflect::reflect_partial_eq` to rely on /// the type's [`PartialEq`] implementation. /// A custom implementation may be provided using `#[reflect(PartialEq(my_partial_eq_func))]` where /// `my_partial_eq_func` is the path to a function matching the signature: -/// `(&self, value: &dyn #bevy_reflect_path::Reflect) -> bool`. +/// `(&Self, value: &dyn #bevy_reflect_path::Reflect) -> bool`. /// * `#[reflect(Hash)]` will force the implementation of `Reflect::reflect_hash` to rely on /// the type's [`Hash`] implementation. /// A custom implementation may be provided using `#[reflect(Hash(my_hash_func))]` where -/// `my_hash_func` is the path to a function matching the signature: `(&self) -> u64`. +/// `my_hash_func` is the path to a function matching the signature: `(&Self) -> u64`. /// * `#[reflect(Default)]` will register the `ReflectDefault` type data as normal. /// However, it will also affect how certain other operations are performed in order /// to improve performance and/or robustness. @@ -339,6 +344,18 @@ fn match_reflect_impls(ast: DeriveInput, source: ReflectImplSource) -> TokenStre /// What this does is register the `SerializationData` type within the `GetTypeRegistration` implementation, /// which will be used by the reflection serializers to determine whether or not the field is serializable. /// +/// ## `#[reflect(clone)]` +/// +/// This attribute affects the `Reflect::reflect_clone` implementation. +/// +/// Without this attribute, the implementation will rely on the field's own `Reflect::reflect_clone` implementation. +/// When this attribute is present, the implementation will instead use the field's `Clone` implementation directly. +/// +/// The attribute may also take the path to a custom function like `#[reflect(clone = "path::to::my_clone_func")]`, +/// where `my_clone_func` matches the signature `(&Self) -> Self`. +/// +/// This attribute does nothing if the containing struct/enum has the `#[reflect(Clone)]` attribute. +/// /// ## `#[reflect(@...)]` /// /// This attribute can be used to register custom attributes to the field's `TypeInfo`. diff --git a/crates/bevy_reflect/derive/src/string_expr.rs b/crates/bevy_reflect/derive/src/string_expr.rs index cc48a90b91..dc878f39a9 100644 --- a/crates/bevy_reflect/derive/src/string_expr.rs +++ b/crates/bevy_reflect/derive/src/string_expr.rs @@ -80,7 +80,7 @@ impl StringExpr { let owned = self.into_owned(); let borrowed = other.into_borrowed(); Self::Owned(quote! { - #owned + #borrowed + ::core::ops::Add::<&str>::add(#owned, #borrowed) }) } } diff --git a/crates/bevy_reflect/derive/src/struct_utility.rs b/crates/bevy_reflect/derive/src/struct_utility.rs index 09604419b6..9bfd72de60 100644 --- a/crates/bevy_reflect/derive/src/struct_utility.rs +++ b/crates/bevy_reflect/derive/src/struct_utility.rs @@ -1,5 +1,4 @@ -use crate::{derive_data::StructField, ReflectStruct}; -use quote::quote; +use crate::ReflectStruct; /// A helper struct for creating remote-aware field accessors. /// @@ -20,27 +19,15 @@ pub(crate) struct FieldAccessors { impl FieldAccessors { pub fn new(reflect_struct: &ReflectStruct) -> Self { - let bevy_reflect_path = reflect_struct.meta().bevy_reflect_path(); - let fields_ref = Self::get_fields(reflect_struct, |field, accessor| { - match &field.attrs.remote { - Some(wrapper_ty) => { - quote! { - <#wrapper_ty as #bevy_reflect_path::ReflectRemote>::as_wrapper(&#accessor) - } - } - None => quote!(& #accessor), - } - }); - let fields_mut = Self::get_fields(reflect_struct, |field, accessor| { - match &field.attrs.remote { - Some(wrapper_ty) => { - quote! { - <#wrapper_ty as #bevy_reflect_path::ReflectRemote>::as_wrapper_mut(&mut #accessor) - } - } - None => quote!(&mut #accessor), - } - }); + let (fields_ref, fields_mut): (Vec<_>, Vec<_>) = reflect_struct + .active_fields() + .map(|field| { + ( + reflect_struct.access_for_field(field, false), + reflect_struct.access_for_field(field, true), + ) + }) + .unzip(); let field_count = fields_ref.len(); let field_indices = (0..field_count).collect(); @@ -52,30 +39,4 @@ impl FieldAccessors { field_count, } } - - fn get_fields( - reflect_struct: &ReflectStruct, - mut wrapper_fn: F, - ) -> Vec - where - F: FnMut(&StructField, proc_macro2::TokenStream) -> proc_macro2::TokenStream, - { - let is_remote = reflect_struct.meta().is_remote_wrapper(); - reflect_struct - .active_fields() - .map(|field| { - let member = crate::ident::ident_or_index( - field.data.ident.as_ref(), - field.declaration_index, - ); - let accessor = if is_remote { - quote!(self.0.#member) - } else { - quote!(self.#member) - }; - - wrapper_fn(field, accessor) - }) - .collect::>() - } } diff --git a/crates/bevy_reflect/examples/reflect_docs.rs b/crates/bevy_reflect/examples/reflect_docs.rs index 10852185c0..d52589cc1d 100644 --- a/crates/bevy_reflect/examples/reflect_docs.rs +++ b/crates/bevy_reflect/examples/reflect_docs.rs @@ -6,6 +6,8 @@ //! //! These scenarios can readily be achieved by using `bevy_reflect` with the `documentation` feature. +#![expect(clippy::print_stdout, reason = "Allowed in examples.")] + use bevy_reflect::{Reflect, TypeInfo, Typed}; fn main() { diff --git a/crates/bevy_reflect/src/array.rs b/crates/bevy_reflect/src/array.rs index 300e69c2a7..9ad906cfce 100644 --- a/crates/bevy_reflect/src/array.rs +++ b/crates/bevy_reflect/src/array.rs @@ -69,10 +69,16 @@ pub trait Array: PartialReflect { fn drain(self: Box) -> Vec>; /// Clones the list, producing a [`DynamicArray`]. + #[deprecated(since = "0.16.0", note = "use `to_dynamic_array` instead")] fn clone_dynamic(&self) -> DynamicArray { + self.to_dynamic_array() + } + + /// Creates a new [`DynamicArray`] from this array. + fn to_dynamic_array(&self) -> DynamicArray { DynamicArray { represented_type: self.get_represented_type_info(), - values: self.iter().map(PartialReflect::clone_value).collect(), + values: self.iter().map(PartialReflect::to_dynamic).collect(), } } @@ -256,11 +262,6 @@ impl PartialReflect for DynamicArray { ReflectOwned::Array(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - #[inline] fn reflect_hash(&self) -> Option { array_hash(self) @@ -307,18 +308,6 @@ impl Array for DynamicArray { fn drain(self: Box) -> Vec> { self.values.into_vec() } - - #[inline] - fn clone_dynamic(&self) -> DynamicArray { - DynamicArray { - represented_type: self.represented_type, - values: self - .values - .iter() - .map(|value| value.clone_value()) - .collect(), - } - } } impl FromIterator> for DynamicArray { diff --git a/crates/bevy_reflect/src/enums/dynamic_enum.rs b/crates/bevy_reflect/src/enums/dynamic_enum.rs index d9e228c07e..3380921fbe 100644 --- a/crates/bevy_reflect/src/enums/dynamic_enum.rs +++ b/crates/bevy_reflect/src/enums/dynamic_enum.rs @@ -23,8 +23,8 @@ impl Clone for DynamicVariant { fn clone(&self) -> Self { match self { DynamicVariant::Unit => DynamicVariant::Unit, - DynamicVariant::Tuple(data) => DynamicVariant::Tuple(data.clone_dynamic()), - DynamicVariant::Struct(data) => DynamicVariant::Struct(data.clone_dynamic()), + DynamicVariant::Tuple(data) => DynamicVariant::Tuple(data.to_dynamic_tuple()), + DynamicVariant::Struct(data) => DynamicVariant::Struct(data.to_dynamic_struct()), } } } @@ -140,6 +140,22 @@ impl DynamicEnum { self.variant = variant.into(); } + /// Get a reference to the [`DynamicVariant`] contained in `self`. + pub fn variant(&self) -> &DynamicVariant { + &self.variant + } + + /// Get a mutable reference to the [`DynamicVariant`] contained in `self`. + /// + /// Using the mut reference to switch to a different variant will ___not___ update the + /// internal tracking of the variant name and index. + /// + /// If you want to switch variants, prefer one of the setters: + /// [`DynamicEnum::set_variant`] or [`DynamicEnum::set_variant_with_index`]. + pub fn variant_mut(&mut self) -> &mut DynamicVariant { + &mut self.variant + } + /// Create a [`DynamicEnum`] from an existing one. /// /// This is functionally the same as [`DynamicEnum::from_ref`] except it takes an owned value. @@ -150,7 +166,7 @@ impl DynamicEnum { /// Create a [`DynamicEnum`] from an existing one. /// /// This is functionally the same as [`DynamicEnum::from`] except it takes a reference. - pub fn from_ref(value: &TEnum) -> Self { + pub fn from_ref(value: &TEnum) -> Self { let type_info = value.get_represented_type_info(); let mut dyn_enum = match value.variant_type() { VariantType::Unit => DynamicEnum::new_with_index( @@ -161,7 +177,7 @@ impl DynamicEnum { VariantType::Tuple => { let mut data = DynamicTuple::default(); for field in value.iter_fields() { - data.insert_boxed(field.value().clone_value()); + data.insert_boxed(field.value().to_dynamic()); } DynamicEnum::new_with_index( value.variant_index(), @@ -173,7 +189,7 @@ impl DynamicEnum { let mut data = DynamicStruct::default(); for field in value.iter_fields() { let name = field.name().unwrap(); - data.insert_boxed(name, field.value().clone_value()); + data.insert_boxed(name, field.value().to_dynamic()); } DynamicEnum::new_with_index( value.variant_index(), @@ -339,14 +355,14 @@ impl PartialReflect for DynamicEnum { VariantType::Tuple => { let mut dyn_tuple = DynamicTuple::default(); for field in value.iter_fields() { - dyn_tuple.insert_boxed(field.value().clone_value()); + dyn_tuple.insert_boxed(field.value().to_dynamic()); } DynamicVariant::Tuple(dyn_tuple) } VariantType::Struct => { let mut dyn_struct = DynamicStruct::default(); for field in value.iter_fields() { - dyn_struct.insert_boxed(field.name().unwrap(), field.value().clone_value()); + dyn_struct.insert_boxed(field.name().unwrap(), field.value().to_dynamic()); } DynamicVariant::Struct(dyn_struct) } @@ -377,11 +393,6 @@ impl PartialReflect for DynamicEnum { ReflectOwned::Enum(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - #[inline] fn reflect_hash(&self) -> Option { enum_hash(self) diff --git a/crates/bevy_reflect/src/enums/enum_trait.rs b/crates/bevy_reflect/src/enums/enum_trait.rs index d3f6550640..bcbcb300d5 100644 --- a/crates/bevy_reflect/src/enums/enum_trait.rs +++ b/crates/bevy_reflect/src/enums/enum_trait.rs @@ -5,8 +5,8 @@ use crate::{ DynamicEnum, Generics, PartialReflect, Type, TypePath, VariantInfo, VariantType, }; use alloc::{boxed::Box, format, string::String}; -use bevy_platform_support::collections::HashMap; -use bevy_platform_support::sync::Arc; +use bevy_platform::collections::HashMap; +use bevy_platform::sync::Arc; use core::slice::Iter; /// A trait used to power [enum-like] operations via [reflection]. @@ -125,7 +125,14 @@ pub trait Enum: PartialReflect { /// The type of the current variant. fn variant_type(&self) -> VariantType; // Clones the enum into a [`DynamicEnum`]. - fn clone_dynamic(&self) -> DynamicEnum; + #[deprecated(since = "0.16.0", note = "use `to_dynamic_enum` instead")] + fn clone_dynamic(&self) -> DynamicEnum { + self.to_dynamic_enum() + } + /// Creates a new [`DynamicEnum`] from this enum. + fn to_dynamic_enum(&self) -> DynamicEnum { + DynamicEnum::from_ref(self) + } /// Returns true if the current variant's type matches the given one. fn is_variant(&self, variant_type: VariantType) -> bool { self.variant_type() == variant_type diff --git a/crates/bevy_reflect/src/enums/variants.rs b/crates/bevy_reflect/src/enums/variants.rs index 25e40334e7..55ccb8efb1 100644 --- a/crates/bevy_reflect/src/enums/variants.rs +++ b/crates/bevy_reflect/src/enums/variants.rs @@ -3,8 +3,8 @@ use crate::{ NamedField, UnnamedField, }; use alloc::boxed::Box; -use bevy_platform_support::collections::HashMap; -use bevy_platform_support::sync::Arc; +use bevy_platform::collections::HashMap; +use bevy_platform::sync::Arc; use core::slice::Iter; use thiserror::Error; diff --git a/crates/bevy_reflect/src/error.rs b/crates/bevy_reflect/src/error.rs new file mode 100644 index 0000000000..e783a33775 --- /dev/null +++ b/crates/bevy_reflect/src/error.rs @@ -0,0 +1,61 @@ +use crate::FieldId; +use alloc::{borrow::Cow, format}; +use thiserror::Error; + +/// An error that occurs when cloning a type via [`PartialReflect::reflect_clone`]. +/// +/// [`PartialReflect::reflect_clone`]: crate::PartialReflect::reflect_clone +#[derive(Clone, Debug, Error, PartialEq, Eq)] +pub enum ReflectCloneError { + /// The type does not have a custom implementation for [`PartialReflect::reflect_clone`]. + /// + /// [`PartialReflect::reflect_clone`]: crate::PartialReflect::reflect_clone + #[error("`PartialReflect::reflect_clone` not implemented for `{type_path}`")] + NotImplemented { type_path: Cow<'static, str> }, + /// The type cannot be cloned via [`PartialReflect::reflect_clone`]. + /// + /// This type should be returned when a type is intentionally opting out of reflection cloning. + /// + /// [`PartialReflect::reflect_clone`]: crate::PartialReflect::reflect_clone + #[error("`{type_path}` cannot be made cloneable for `PartialReflect::reflect_clone`")] + NotCloneable { type_path: Cow<'static, str> }, + /// The field cannot be cloned via [`PartialReflect::reflect_clone`]. + /// + /// When [deriving `Reflect`], this usually means that a field marked with `#[reflect(ignore)]` + /// is missing a `#[reflect(clone)]` attribute. + /// + /// This may be intentional if the field is not meant/able to be cloned. + /// + /// [`PartialReflect::reflect_clone`]: crate::PartialReflect::reflect_clone + /// [deriving `Reflect`]: derive@crate::Reflect + #[error( + "field `{}` cannot be made cloneable for `PartialReflect::reflect_clone` (are you missing a `#[reflect(clone)]` attribute?)", + full_path(.field, .variant.as_deref(), .container_type_path) + )] + FieldNotCloneable { + field: FieldId, + variant: Option>, + container_type_path: Cow<'static, str>, + }, + /// Could not downcast to the expected type. + /// + /// Realistically this should only occur when a type has incorrectly implemented [`Reflect`]. + /// + /// [`Reflect`]: crate::Reflect + #[error("expected downcast to `{expected}`, but received `{received}`")] + FailedDowncast { + expected: Cow<'static, str>, + received: Cow<'static, str>, + }, +} + +fn full_path( + field: &FieldId, + variant: Option<&str>, + container_type_path: &str, +) -> alloc::string::String { + match variant { + Some(variant) => format!("{}::{}::{}", container_type_path, variant, field), + None => format!("{}::{}", container_type_path, field), + } +} diff --git a/crates/bevy_reflect/src/fields.rs b/crates/bevy_reflect/src/fields.rs index 3a521c21cc..21d4ccd98a 100644 --- a/crates/bevy_reflect/src/fields.rs +++ b/crates/bevy_reflect/src/fields.rs @@ -3,7 +3,9 @@ use crate::{ type_info::impl_type_methods, MaybeTyped, PartialReflect, Type, TypeInfo, TypePath, }; -use bevy_platform_support::sync::Arc; +use alloc::borrow::Cow; +use bevy_platform::sync::Arc; +use core::fmt::{Display, Formatter}; /// The named field of a reflected struct. #[derive(Clone, Debug)] @@ -129,3 +131,19 @@ impl UnnamedField { impl_custom_attribute_methods!(self.custom_attributes, "field"); } + +/// A representation of a field's accessor. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum FieldId { + Named(Cow<'static, str>), + Unnamed(usize), +} + +impl Display for FieldId { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self { + Self::Named(name) => Display::fmt(name, f), + Self::Unnamed(index) => Display::fmt(index, f), + } + } +} diff --git a/crates/bevy_reflect/src/func/args/count.rs b/crates/bevy_reflect/src/func/args/count.rs index d5f410f88d..159950ca61 100644 --- a/crates/bevy_reflect/src/func/args/count.rs +++ b/crates/bevy_reflect/src/func/args/count.rs @@ -264,7 +264,7 @@ mod tests { } #[test] - fn should_allow_removeting_nonexistent_count() { + fn should_allow_removing_nonexistent_count() { let mut count = ArgCount::default(); assert_eq!(count.len(), 0); diff --git a/crates/bevy_reflect/src/func/dynamic_function.rs b/crates/bevy_reflect/src/func/dynamic_function.rs index c090442ce4..7a5da57525 100644 --- a/crates/bevy_reflect/src/func/dynamic_function.rs +++ b/crates/bevy_reflect/src/func/dynamic_function.rs @@ -11,7 +11,7 @@ use crate::{ ReflectRef, TypeInfo, TypePath, }; use alloc::{borrow::Cow, boxed::Box}; -use bevy_platform_support::sync::Arc; +use bevy_platform::sync::Arc; use bevy_reflect_derive::impl_type_path; use core::fmt::{Debug, Formatter}; @@ -94,7 +94,7 @@ impl<'env> DynamicFunction<'env> { ) -> Self { let arc = Arc::new(func); - #[cfg(feature = "portable-atomic")] + #[cfg(not(target_has_atomic = "ptr"))] #[expect( unsafe_code, reason = "unsized coercion is an unstable feature for non-std types" @@ -358,7 +358,7 @@ impl Function for DynamicFunction<'static> { self.call(args) } - fn clone_dynamic(&self) -> DynamicFunction<'static> { + fn to_dynamic_function(&self) -> DynamicFunction<'static> { self.clone() } } @@ -395,7 +395,7 @@ impl PartialReflect for DynamicFunction<'static> { fn try_apply(&mut self, value: &dyn PartialReflect) -> Result<(), ApplyError> { match value.reflect_ref() { ReflectRef::Function(func) => { - *self = func.clone_dynamic(); + *self = func.to_dynamic_function(); Ok(()) } _ => Err(ApplyError::MismatchedTypes { @@ -421,10 +421,6 @@ impl PartialReflect for DynamicFunction<'static> { ReflectOwned::Function(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone()) - } - fn reflect_hash(&self) -> Option { None } @@ -484,7 +480,7 @@ mod tests { use crate::func::{FunctionError, IntoReturn, SignatureInfo}; use crate::Type; use alloc::{format, string::String, vec, vec::Vec}; - use bevy_platform_support::collections::HashSet; + use bevy_platform::collections::HashSet; use core::ops::Add; #[test] @@ -562,14 +558,14 @@ mod tests { assert_eq!(greet.name().unwrap(), "greet"); assert_eq!(clone.name().unwrap(), "greet"); - let clone_value = clone + let cloned_value = clone .call(ArgList::default().with_ref(&String::from("world"))) .unwrap() .unwrap_owned() .try_take::() .unwrap(); - assert_eq!(clone_value, "Hello, world!"); + assert_eq!(cloned_value, "Hello, world!"); } #[test] diff --git a/crates/bevy_reflect/src/func/dynamic_function_internal.rs b/crates/bevy_reflect/src/func/dynamic_function_internal.rs index eb855dc5ce..7e36ec119d 100644 --- a/crates/bevy_reflect/src/func/dynamic_function_internal.rs +++ b/crates/bevy_reflect/src/func/dynamic_function_internal.rs @@ -2,7 +2,7 @@ use crate::func::args::ArgCount; use crate::func::signature::{ArgListSignature, ArgumentSignature}; use crate::func::{ArgList, FunctionError, FunctionInfo, FunctionOverloadError}; use alloc::{borrow::Cow, vec, vec::Vec}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use core::fmt::{Debug, Formatter}; /// An internal structure for storing a function and its corresponding [function information]. @@ -246,7 +246,7 @@ mod tests { } #[test] - fn should_merge_overloaed_into_single() { + fn should_merge_overload_into_single() { let mut func_a = DynamicFunctionInternal { functions: vec!['a', 'b'], info: FunctionInfo::new(SignatureInfo::anonymous().with_arg::("arg0")) diff --git a/crates/bevy_reflect/src/func/dynamic_function_mut.rs b/crates/bevy_reflect/src/func/dynamic_function_mut.rs index b706ac620a..6d8be5ac47 100644 --- a/crates/bevy_reflect/src/func/dynamic_function_mut.rs +++ b/crates/bevy_reflect/src/func/dynamic_function_mut.rs @@ -1,5 +1,5 @@ use alloc::{borrow::Cow, boxed::Box}; -use bevy_platform_support::sync::Arc; +use bevy_platform::sync::Arc; use core::fmt::{Debug, Formatter}; use crate::func::{ diff --git a/crates/bevy_reflect/src/func/error.rs b/crates/bevy_reflect/src/func/error.rs index d4407d9698..d9d105db1b 100644 --- a/crates/bevy_reflect/src/func/error.rs +++ b/crates/bevy_reflect/src/func/error.rs @@ -4,7 +4,7 @@ use crate::func::{ Return, }; use alloc::borrow::Cow; -use bevy_platform_support::collections::HashSet; +use bevy_platform::collections::HashSet; use thiserror::Error; /// An error that occurs when calling a [`DynamicFunction`] or [`DynamicFunctionMut`]. diff --git a/crates/bevy_reflect/src/func/function.rs b/crates/bevy_reflect/src/func/function.rs index 2ae8c1877b..eb770e9e50 100644 --- a/crates/bevy_reflect/src/func/function.rs +++ b/crates/bevy_reflect/src/func/function.rs @@ -64,7 +64,13 @@ pub trait Function: PartialReflect + Debug { fn reflect_call<'a>(&self, args: ArgList<'a>) -> FunctionResult<'a>; /// Clone this function into a [`DynamicFunction`]. - fn clone_dynamic(&self) -> DynamicFunction<'static>; + #[deprecated(since = "0.16.0", note = "use `to_dynamic_function` instead")] + fn clone_dynamic(&self) -> DynamicFunction<'static> { + self.to_dynamic_function() + } + + /// Creates a new [`DynamicFunction`] from this function. + fn to_dynamic_function(&self) -> DynamicFunction<'static>; } #[cfg(test)] diff --git a/crates/bevy_reflect/src/func/registry.rs b/crates/bevy_reflect/src/func/registry.rs index 450a3722d4..58a8344ecf 100644 --- a/crates/bevy_reflect/src/func/registry.rs +++ b/crates/bevy_reflect/src/func/registry.rs @@ -1,8 +1,9 @@ use alloc::borrow::Cow; -use bevy_platform_support::collections::HashMap; -use bevy_platform_support::sync::Arc; +use bevy_platform::{ + collections::HashMap, + sync::{Arc, PoisonError, RwLock, RwLockReadGuard, RwLockWriteGuard}, +}; use core::fmt::Debug; -use std::sync::{PoisonError, RwLock, RwLockReadGuard, RwLockWriteGuard}; use crate::func::{ ArgList, DynamicFunction, FunctionRegistrationError, FunctionResult, IntoFunction, diff --git a/crates/bevy_reflect/src/func/signature.rs b/crates/bevy_reflect/src/func/signature.rs index c8862b35d0..7813d7d4f9 100644 --- a/crates/bevy_reflect/src/func/signature.rs +++ b/crates/bevy_reflect/src/func/signature.rs @@ -15,7 +15,7 @@ use crate::func::args::ArgInfo; use crate::func::{ArgList, SignatureInfo}; use crate::Type; use alloc::boxed::Box; -use bevy_platform_support::collections::Equivalent; +use bevy_platform::collections::Equivalent; use core::borrow::Borrow; use core::fmt::{Debug, Formatter}; use core::hash::{Hash, Hasher}; diff --git a/crates/bevy_reflect/src/generics.rs b/crates/bevy_reflect/src/generics.rs index 32b13100f5..8c9c4816ba 100644 --- a/crates/bevy_reflect/src/generics.rs +++ b/crates/bevy_reflect/src/generics.rs @@ -1,7 +1,7 @@ use crate::type_info::impl_type_methods; use crate::{Reflect, Type, TypePath}; use alloc::{borrow::Cow, boxed::Box}; -use bevy_platform_support::sync::Arc; +use bevy_platform::sync::Arc; use core::ops::Deref; use derive_more::derive::From; @@ -183,7 +183,7 @@ impl ConstParamInfo { pub fn with_default(mut self, default: T) -> Self { let arc = Arc::new(default); - #[cfg(feature = "portable-atomic")] + #[cfg(not(target_has_atomic = "ptr"))] #[expect( unsafe_code, reason = "unsized coercion is an unstable feature for non-std types" diff --git a/crates/bevy_reflect/src/impls/glam.rs b/crates/bevy_reflect/src/impls/glam.rs index 653eb3edaa..139557ddb6 100644 --- a/crates/bevy_reflect/src/impls/glam.rs +++ b/crates/bevy_reflect/src/impls/glam.rs @@ -18,7 +18,7 @@ macro_rules! reflect_enum { } impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct IVec2 { x: i32, @@ -26,7 +26,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct IVec3 { x: i32, @@ -35,7 +35,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct IVec4 { x: i32, @@ -46,7 +46,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct I8Vec2 { x: i8, @@ -55,7 +55,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct I8Vec3 { x: i8, @@ -65,7 +65,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct I8Vec4 { x: i8, @@ -76,7 +76,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct I16Vec2 { x: i16, @@ -85,7 +85,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct I16Vec3 { x: i16, @@ -95,7 +95,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct I16Vec4 { x: i16, @@ -106,7 +106,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct I64Vec2 { x: i64, @@ -115,7 +115,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct I64Vec3 { x: i64, @@ -125,7 +125,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct I64Vec4 { x: i64, @@ -136,7 +136,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct UVec2 { x: u32, @@ -144,7 +144,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct UVec3 { x: u32, @@ -153,7 +153,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct UVec4 { x: u32, @@ -164,7 +164,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct U8Vec2 { x: u8, @@ -172,7 +172,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct U8Vec3 { x: u8, @@ -181,7 +181,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct U8Vec4 { x: u8, @@ -192,7 +192,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct U16Vec2 { x: u16, @@ -200,7 +200,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct U16Vec3 { x: u16, @@ -209,7 +209,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct U16Vec4 { x: u16, @@ -220,7 +220,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct U64Vec2 { x: u64, @@ -228,7 +228,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct U64Vec3 { x: u64, @@ -237,7 +237,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct U64Vec4 { x: u64, @@ -248,7 +248,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Vec2 { x: f32, @@ -256,7 +256,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Vec3 { x: f32, @@ -265,7 +265,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Vec3A { x: f32, @@ -274,7 +274,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Vec4 { x: f32, @@ -285,7 +285,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct BVec2 { x: bool, @@ -293,7 +293,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct BVec3 { x: bool, @@ -302,7 +302,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct BVec4 { x: bool, @@ -313,7 +313,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DVec2 { x: f64, @@ -321,7 +321,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DVec3 { x: f64, @@ -330,7 +330,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DVec4 { x: f64, @@ -341,7 +341,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Mat2 { x_axis: Vec2, @@ -349,7 +349,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Mat3 { x_axis: Vec3, @@ -358,7 +358,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Mat3A { x_axis: Vec3A, @@ -367,7 +367,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Mat4 { x_axis: Vec4, @@ -378,7 +378,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DMat2 { x_axis: DVec2, @@ -386,7 +386,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DMat3 { x_axis: DVec3, @@ -395,7 +395,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DMat4 { x_axis: DVec4, @@ -406,7 +406,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Affine2 { matrix2: Mat2, @@ -414,7 +414,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Affine3A { matrix3: Mat3A, @@ -423,7 +423,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DAffine2 { matrix2: DMat2, @@ -431,7 +431,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DAffine3 { matrix3: DMat3, @@ -440,7 +440,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Quat { x: f32, @@ -450,7 +450,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DQuat { x: f64, @@ -461,7 +461,7 @@ impl_reflect!( ); reflect_enum!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] enum EulerRot { ZYX, @@ -491,8 +491,20 @@ reflect_enum!( } ); -impl_reflect_opaque!(::glam::BVec3A(Debug, Default, Deserialize, Serialize)); -impl_reflect_opaque!(::glam::BVec4A(Debug, Default, Deserialize, Serialize)); +impl_reflect_opaque!(::glam::BVec3A( + Clone, + Debug, + Default, + Deserialize, + Serialize +)); +impl_reflect_opaque!(::glam::BVec4A( + Clone, + Debug, + Default, + Deserialize, + Serialize +)); #[cfg(test)] mod tests { diff --git a/crates/bevy_reflect/src/impls/petgraph.rs b/crates/bevy_reflect/src/impls/petgraph.rs index 2264c9cb4b..ce2bf77e37 100644 --- a/crates/bevy_reflect/src/impls/petgraph.rs +++ b/crates/bevy_reflect/src/impls/petgraph.rs @@ -1,7 +1,10 @@ use crate::{impl_reflect_opaque, prelude::ReflectDefault, ReflectDeserialize, ReflectSerialize}; impl_reflect_opaque!(::petgraph::graph::NodeIndex( + Clone, Default, + PartialEq, + Hash, Serialize, Deserialize )); @@ -9,4 +12,4 @@ impl_reflect_opaque!(::petgraph::graph::DiGraph< N: ::core::clone::Clone, E: ::core::clone::Clone, Ix: ::petgraph::graph::IndexType ->()); +>(Clone)); diff --git a/crates/bevy_reflect/src/impls/smallvec.rs b/crates/bevy_reflect/src/impls/smallvec.rs index afb75aff8d..942bcbe83f 100644 --- a/crates/bevy_reflect/src/impls/smallvec.rs +++ b/crates/bevy_reflect/src/impls/smallvec.rs @@ -1,14 +1,14 @@ -use alloc::{boxed::Box, vec::Vec}; -use bevy_reflect_derive::impl_type_path; -use core::any::Any; -use smallvec::{Array as SmallArray, SmallVec}; - use crate::{ utility::GenericTypeInfoCell, ApplyError, FromReflect, FromType, Generics, GetTypeRegistration, List, ListInfo, ListIter, MaybeTyped, PartialReflect, Reflect, ReflectFromPtr, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, TypeInfo, TypeParamInfo, TypePath, TypeRegistration, Typed, }; +use alloc::{borrow::Cow, boxed::Box, string::ToString, vec::Vec}; +use bevy_reflect::ReflectCloneError; +use bevy_reflect_derive::impl_type_path; +use core::any::Any; +use smallvec::{Array as SmallArray, SmallVec}; impl List for SmallVec where @@ -134,8 +134,20 @@ where ReflectOwned::List(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new( + self.iter() + .map(|value| { + value + .reflect_clone()? + .take() + .map_err(|_| ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(value.reflect_type_path().to_string()), + }) + }) + .collect::>()?, + )) } fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { diff --git a/crates/bevy_reflect/src/impls/smol_str.rs b/crates/bevy_reflect/src/impls/smol_str.rs index e2c09b206b..d07a00cd6f 100644 --- a/crates/bevy_reflect/src/impls/smol_str.rs +++ b/crates/bevy_reflect/src/impls/smol_str.rs @@ -2,6 +2,7 @@ use crate::{std_traits::ReflectDefault, ReflectDeserialize, ReflectSerialize}; use bevy_reflect_derive::impl_reflect_opaque; impl_reflect_opaque!(::smol_str::SmolStr( + Clone, Debug, Hash, PartialEq, diff --git a/crates/bevy_reflect/src/impls/std.rs b/crates/bevy_reflect/src/impls/std.rs index 447e480f14..350527f910 100644 --- a/crates/bevy_reflect/src/impls/std.rs +++ b/crates/bevy_reflect/src/impls/std.rs @@ -9,9 +9,9 @@ use crate::{ reflect::impl_full_reflect, set_apply, set_partial_eq, set_try_apply, utility::{reflect_hasher, GenericTypeInfoCell, GenericTypePathCell, NonGenericTypeInfoCell}, - ApplyError, Array, ArrayInfo, ArrayIter, DynamicMap, DynamicSet, DynamicTypePath, FromReflect, - FromType, Generics, GetTypeRegistration, List, ListInfo, ListIter, Map, MapInfo, MapIter, - MaybeTyped, OpaqueInfo, PartialReflect, Reflect, ReflectDeserialize, ReflectFromPtr, + ApplyError, Array, ArrayInfo, ArrayIter, DynamicMap, DynamicTypePath, FromReflect, FromType, + Generics, GetTypeRegistration, List, ListInfo, ListIter, Map, MapInfo, MapIter, MaybeTyped, + OpaqueInfo, PartialReflect, Reflect, ReflectCloneError, ReflectDeserialize, ReflectFromPtr, ReflectFromReflect, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, ReflectSerialize, Set, SetInfo, TypeInfo, TypeParamInfo, TypePath, TypeRegistration, TypeRegistry, Typed, }; @@ -20,6 +20,7 @@ use alloc::{ boxed::Box, collections::VecDeque, format, + string::ToString, vec::Vec, }; use bevy_reflect_derive::{impl_reflect, impl_reflect_opaque}; @@ -34,6 +35,7 @@ use core::{ use std::path::Path; impl_reflect_opaque!(bool( + Clone, Debug, Hash, PartialEq, @@ -42,6 +44,43 @@ impl_reflect_opaque!(bool( Default )); impl_reflect_opaque!(char( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(u8( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(u16( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(u32( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(u64( + Clone, Debug, Hash, PartialEq, @@ -49,11 +88,8 @@ impl_reflect_opaque!(char( Deserialize, Default )); -impl_reflect_opaque!(u8(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(u16(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(u32(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(u64(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); impl_reflect_opaque!(u128( + Clone, Debug, Hash, PartialEq, @@ -62,6 +98,43 @@ impl_reflect_opaque!(u128( Default )); impl_reflect_opaque!(usize( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(i8( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(i16( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(i32( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(i64( + Clone, Debug, Hash, PartialEq, @@ -69,11 +142,8 @@ impl_reflect_opaque!(usize( Deserialize, Default )); -impl_reflect_opaque!(i8(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(i16(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(i32(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(i64(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); impl_reflect_opaque!(i128( + Clone, Debug, Hash, PartialEq, @@ -82,6 +152,7 @@ impl_reflect_opaque!(i128( Default )); impl_reflect_opaque!(isize( + Clone, Debug, Hash, PartialEq, @@ -89,10 +160,25 @@ impl_reflect_opaque!(isize( Deserialize, Default )); -impl_reflect_opaque!(f32(Debug, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(f64(Debug, PartialEq, Serialize, Deserialize, Default)); +impl_reflect_opaque!(f32( + Clone, + Debug, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(f64( + Clone, + Debug, + PartialEq, + Serialize, + Deserialize, + Default +)); impl_type_path!(str); impl_reflect_opaque!(::alloc::string::String( + Clone, Debug, Hash, PartialEq, @@ -102,6 +188,7 @@ impl_reflect_opaque!(::alloc::string::String( )); #[cfg(feature = "std")] impl_reflect_opaque!(::std::path::PathBuf( + Clone, Debug, Hash, PartialEq, @@ -109,16 +196,17 @@ impl_reflect_opaque!(::std::path::PathBuf( Deserialize, Default )); -impl_reflect_opaque!(::core::any::TypeId(Debug, Hash, PartialEq,)); -impl_reflect_opaque!(::alloc::collections::BTreeSet()); -impl_reflect_opaque!(::core::ops::Range()); -impl_reflect_opaque!(::core::ops::RangeInclusive()); -impl_reflect_opaque!(::core::ops::RangeFrom()); -impl_reflect_opaque!(::core::ops::RangeTo()); -impl_reflect_opaque!(::core::ops::RangeToInclusive()); -impl_reflect_opaque!(::core::ops::RangeFull()); -impl_reflect_opaque!(::core::ops::Bound()); +impl_reflect_opaque!(::core::any::TypeId(Clone, Debug, Hash, PartialEq,)); +impl_reflect_opaque!(::alloc::collections::BTreeSet(Clone)); +impl_reflect_opaque!(::core::ops::Range(Clone)); +impl_reflect_opaque!(::core::ops::RangeInclusive(Clone)); +impl_reflect_opaque!(::core::ops::RangeFrom(Clone)); +impl_reflect_opaque!(::core::ops::RangeTo(Clone)); +impl_reflect_opaque!(::core::ops::RangeToInclusive(Clone)); +impl_reflect_opaque!(::core::ops::RangeFull(Clone)); +impl_reflect_opaque!(::core::ops::Bound(Clone)); impl_reflect_opaque!(::core::time::Duration( + Clone, Debug, Hash, PartialEq, @@ -126,10 +214,11 @@ impl_reflect_opaque!(::core::time::Duration( Deserialize, Default )); -impl_reflect_opaque!(::bevy_platform_support::time::Instant( - Debug, Hash, PartialEq +impl_reflect_opaque!(::bevy_platform::time::Instant( + Clone, Debug, Hash, PartialEq )); impl_reflect_opaque!(::core::num::NonZeroI128( + Clone, Debug, Hash, PartialEq, @@ -137,6 +226,7 @@ impl_reflect_opaque!(::core::num::NonZeroI128( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroU128( + Clone, Debug, Hash, PartialEq, @@ -144,6 +234,7 @@ impl_reflect_opaque!(::core::num::NonZeroU128( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroIsize( + Clone, Debug, Hash, PartialEq, @@ -151,6 +242,7 @@ impl_reflect_opaque!(::core::num::NonZeroIsize( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroUsize( + Clone, Debug, Hash, PartialEq, @@ -158,6 +250,7 @@ impl_reflect_opaque!(::core::num::NonZeroUsize( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroI64( + Clone, Debug, Hash, PartialEq, @@ -165,6 +258,7 @@ impl_reflect_opaque!(::core::num::NonZeroI64( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroU64( + Clone, Debug, Hash, PartialEq, @@ -172,6 +266,7 @@ impl_reflect_opaque!(::core::num::NonZeroU64( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroU32( + Clone, Debug, Hash, PartialEq, @@ -179,6 +274,7 @@ impl_reflect_opaque!(::core::num::NonZeroU32( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroI32( + Clone, Debug, Hash, PartialEq, @@ -186,6 +282,7 @@ impl_reflect_opaque!(::core::num::NonZeroI32( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroI16( + Clone, Debug, Hash, PartialEq, @@ -193,6 +290,7 @@ impl_reflect_opaque!(::core::num::NonZeroI16( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroU16( + Clone, Debug, Hash, PartialEq, @@ -200,6 +298,7 @@ impl_reflect_opaque!(::core::num::NonZeroU16( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroU8( + Clone, Debug, Hash, PartialEq, @@ -207,25 +306,22 @@ impl_reflect_opaque!(::core::num::NonZeroU8( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroI8( + Clone, Debug, Hash, PartialEq, Serialize, Deserialize )); -impl_reflect_opaque!(::core::num::Wrapping()); -impl_reflect_opaque!(::core::num::Saturating()); -impl_reflect_opaque!(::bevy_platform_support::sync::Arc); - -// We check despite `portable-atomic` being enabled, if the standard library `Arc` is -// also available, and implement Reflect for it. -#[cfg(all(feature = "portable-atomic", target_has_atomic = "ptr"))] -impl_reflect_opaque!(::alloc::sync::Arc); +impl_reflect_opaque!(::core::num::Wrapping(Clone)); +impl_reflect_opaque!(::core::num::Saturating(Clone)); +impl_reflect_opaque!(::bevy_platform::sync::Arc(Clone)); // `Serialize` and `Deserialize` only for platforms supported by serde: // https://github.com/serde-rs/serde/blob/3ffb86fc70efd3d329519e2dddfa306cc04f167c/serde/src/de/impls.rs#L1732 #[cfg(all(any(unix, windows), feature = "std"))] impl_reflect_opaque!(::std::ffi::OsString( + Clone, Debug, Hash, PartialEq, @@ -233,8 +329,8 @@ impl_reflect_opaque!(::std::ffi::OsString( Deserialize )); #[cfg(all(not(any(unix, windows)), feature = "std"))] -impl_reflect_opaque!(::std::ffi::OsString(Debug, Hash, PartialEq)); -impl_reflect_opaque!(::alloc::collections::BinaryHeap); +impl_reflect_opaque!(::std::ffi::OsString(Clone, Debug, Hash, PartialEq)); +impl_reflect_opaque!(::alloc::collections::BinaryHeap(Clone)); macro_rules! impl_reflect_for_atomic { ($ty:ty, $ordering:expr) => { @@ -312,10 +408,12 @@ macro_rules! impl_reflect_for_atomic { fn try_as_reflect_mut(&mut self) -> Option<&mut dyn Reflect> { Some(self) } + #[inline] - fn clone_value(&self) -> Box { - Box::new(<$ty>::new(self.load($ordering))) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(<$ty>::new(self.load($ordering)))) } + #[inline] fn try_apply(&mut self, value: &dyn PartialReflect) -> Result<(), ApplyError> { if let Some(value) = value.try_downcast_ref::() { @@ -523,8 +621,19 @@ macro_rules! impl_reflect_for_veclike { ReflectOwned::List(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new( + self.iter() + .map(|value| { + value.reflect_clone()?.take().map_err(|_| { + ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(value.reflect_type_path().to_string()), + } + }) + }) + .collect::>()?, + )) } fn reflect_hash(&self) -> Option { @@ -613,7 +722,7 @@ macro_rules! impl_reflect_for_hashmap { where K: FromReflect + MaybeTyped + TypePath + GetTypeRegistration + Eq + Hash, V: FromReflect + MaybeTyped + TypePath + GetTypeRegistration, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, { fn get(&self, key: &dyn PartialReflect) -> Option<&dyn PartialReflect> { key.try_downcast_ref::() @@ -661,7 +770,7 @@ macro_rules! impl_reflect_for_hashmap { .collect() } - fn clone_dynamic(&self) -> DynamicMap { + fn to_dynamic_map(&self) -> DynamicMap { let mut dynamic_map = DynamicMap::default(); dynamic_map.set_represented_type(self.get_represented_type_info()); for (k, v) in self { @@ -671,7 +780,7 @@ macro_rules! impl_reflect_for_hashmap { k.reflect_type_path() ) }); - dynamic_map.insert_boxed(Box::new(key), v.clone_value()); + dynamic_map.insert_boxed(Box::new(key), v.to_dynamic()); } dynamic_map } @@ -713,7 +822,7 @@ macro_rules! impl_reflect_for_hashmap { where K: FromReflect + MaybeTyped + TypePath + GetTypeRegistration + Eq + Hash, V: FromReflect + MaybeTyped + TypePath + GetTypeRegistration, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, { fn get_represented_type_info(&self) -> Option<&'static TypeInfo> { Some(::type_info()) @@ -762,8 +871,25 @@ macro_rules! impl_reflect_for_hashmap { ReflectOwned::Map(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + let mut map = Self::with_capacity_and_hasher(self.len(), S::default()); + for (key, value) in self.iter() { + let key = key.reflect_clone()?.take().map_err(|_| { + ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(key.reflect_type_path().to_string()), + } + })?; + let value = value.reflect_clone()?.take().map_err(|_| { + ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(value.reflect_type_path().to_string()), + } + })?; + map.insert(key, value); + } + + Ok(Box::new(map)) } fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { @@ -784,14 +910,14 @@ macro_rules! impl_reflect_for_hashmap { where K: FromReflect + MaybeTyped + TypePath + GetTypeRegistration + Eq + Hash, V: FromReflect + MaybeTyped + TypePath + GetTypeRegistration, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, ); impl Typed for $ty where K: FromReflect + MaybeTyped + TypePath + GetTypeRegistration + Eq + Hash, V: FromReflect + MaybeTyped + TypePath + GetTypeRegistration, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, { fn type_info() -> &'static TypeInfo { static CELL: GenericTypeInfoCell = GenericTypeInfoCell::new(); @@ -810,7 +936,7 @@ macro_rules! impl_reflect_for_hashmap { where K: FromReflect + MaybeTyped + TypePath + GetTypeRegistration + Eq + Hash, V: FromReflect + MaybeTyped + TypePath + GetTypeRegistration, - S: TypePath + BuildHasher + Send + Sync + Default, + S: TypePath + BuildHasher + Default + Send + Sync + Default, { fn get_type_registration() -> TypeRegistration { let mut registration = TypeRegistration::of::(); @@ -864,10 +990,10 @@ crate::func::macros::impl_function_traits!(::std::collections::HashMap; > ); -impl_reflect_for_hashmap!(bevy_platform_support::collections::HashMap); -impl_type_path!(::bevy_platform_support::collections::HashMap); +impl_reflect_for_hashmap!(bevy_platform::collections::HashMap); +impl_type_path!(::bevy_platform::collections::HashMap); #[cfg(feature = "functions")] -crate::func::macros::impl_function_traits!(::bevy_platform_support::collections::HashMap; +crate::func::macros::impl_function_traits!(::bevy_platform::collections::HashMap; < K: FromReflect + MaybeTyped + TypePath + GetTypeRegistration + Eq + Hash, V: FromReflect + MaybeTyped + TypePath + GetTypeRegistration, @@ -880,7 +1006,7 @@ macro_rules! impl_reflect_for_hashset { impl Set for $ty where V: FromReflect + TypePath + GetTypeRegistration + Eq + Hash, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, { fn get(&self, value: &dyn PartialReflect) -> Option<&dyn PartialReflect> { value @@ -904,15 +1030,6 @@ macro_rules! impl_reflect_for_hashset { .collect() } - fn clone_dynamic(&self) -> DynamicSet { - let mut dynamic_set = DynamicSet::default(); - dynamic_set.set_represented_type(self.get_represented_type_info()); - for v in self { - dynamic_set.insert_boxed(v.clone_value()); - } - dynamic_set - } - fn insert_boxed(&mut self, value: Box) -> bool { let value = V::take_from_reflect(value).unwrap_or_else(|value| { panic!( @@ -949,7 +1066,7 @@ macro_rules! impl_reflect_for_hashset { impl PartialReflect for $ty where V: FromReflect + TypePath + GetTypeRegistration + Eq + Hash, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, { fn get_represented_type_info(&self) -> Option<&'static TypeInfo> { Some(::type_info()) @@ -1007,8 +1124,19 @@ macro_rules! impl_reflect_for_hashset { ReflectOwned::Set(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + let mut set = Self::with_capacity_and_hasher(self.len(), S::default()); + for value in self.iter() { + let value = value.reflect_clone()?.take().map_err(|_| { + ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(value.reflect_type_path().to_string()), + } + })?; + set.insert(value); + } + + Ok(Box::new(set)) } fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { @@ -1019,7 +1147,7 @@ macro_rules! impl_reflect_for_hashset { impl Typed for $ty where V: FromReflect + TypePath + GetTypeRegistration + Eq + Hash, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, { fn type_info() -> &'static TypeInfo { static CELL: GenericTypeInfoCell = GenericTypeInfoCell::new(); @@ -1036,7 +1164,7 @@ macro_rules! impl_reflect_for_hashset { impl GetTypeRegistration for $ty where V: FromReflect + TypePath + GetTypeRegistration + Eq + Hash, - S: TypePath + BuildHasher + Send + Sync + Default, + S: TypePath + BuildHasher + Default + Send + Sync + Default, { fn get_type_registration() -> TypeRegistration { let mut registration = TypeRegistration::of::(); @@ -1054,7 +1182,7 @@ macro_rules! impl_reflect_for_hashset { for $ty where V: FromReflect + TypePath + GetTypeRegistration + Eq + Hash, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, ); impl FromReflect for $ty @@ -1078,8 +1206,16 @@ macro_rules! impl_reflect_for_hashset { }; } -impl_type_path!(::bevy_platform_support::hash::NoOpHash); -impl_type_path!(::bevy_platform_support::hash::FixedHasher); +impl_type_path!(::bevy_platform::hash::NoOpHash); +impl_type_path!(::bevy_platform::hash::FixedHasher); +impl_reflect_opaque!(::core::net::SocketAddr( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize +)); #[cfg(feature = "std")] impl_reflect_for_hashset!(::std::collections::HashSet); @@ -1093,10 +1229,10 @@ crate::func::macros::impl_function_traits!(::std::collections::HashSet; > ); -impl_reflect_for_hashset!(::bevy_platform_support::collections::HashSet); -impl_type_path!(::bevy_platform_support::collections::HashSet); +impl_reflect_for_hashset!(::bevy_platform::collections::HashSet); +impl_type_path!(::bevy_platform::collections::HashSet); #[cfg(feature = "functions")] -crate::func::macros::impl_function_traits!(::bevy_platform_support::collections::HashSet; +crate::func::macros::impl_function_traits!(::bevy_platform::collections::HashSet; < V: Hash + Eq + FromReflect + TypePath + GetTypeRegistration, S: TypePath + BuildHasher + Default + Send + Sync @@ -1167,7 +1303,7 @@ where k.reflect_type_path() ) }); - dynamic_map.insert_boxed(Box::new(key), v.clone_value()); + dynamic_map.insert_boxed(Box::new(key), v.to_dynamic()); } dynamic_map } @@ -1253,8 +1389,28 @@ where ReflectOwned::Map(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + let mut map = Self::new(); + for (key, value) in self.iter() { + let key = + key.reflect_clone()? + .take() + .map_err(|_| ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(key.reflect_type_path().to_string()), + })?; + let value = + value + .reflect_clone()? + .take() + .map_err(|_| ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(value.reflect_type_path().to_string()), + })?; + map.insert(key, value); + } + + Ok(Box::new(map)) } fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { @@ -1418,11 +1574,6 @@ impl P ReflectOwned::Array(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - #[inline] fn reflect_hash(&self) -> Option { crate::array_hash(self) @@ -1617,8 +1768,8 @@ impl PartialReflect for Cow<'static, str> { ReflectOwned::Opaque(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(self.clone())) } fn reflect_hash(&self) -> Option { @@ -1805,8 +1956,8 @@ impl Parti ReflectOwned::List(self) } - fn clone_value(&self) -> Box { - Box::new(List::clone_dynamic(self)) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(self.clone())) } fn reflect_hash(&self) -> Option { @@ -1914,8 +2065,8 @@ impl PartialReflect for &'static str { ReflectOwned::Opaque(self) } - fn clone_value(&self) -> Box { - Box::new(*self) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(*self)) } fn reflect_hash(&self) -> Option { @@ -2053,8 +2204,8 @@ impl PartialReflect for &'static Path { ReflectOwned::Opaque(self) } - fn clone_value(&self) -> Box { - Box::new(*self) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(*self)) } fn reflect_hash(&self) -> Option { @@ -2192,8 +2343,8 @@ impl PartialReflect for Cow<'static, Path> { ReflectOwned::Opaque(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(self.clone())) } fn reflect_hash(&self) -> Option { @@ -2350,8 +2501,8 @@ impl PartialReflect for &'static Location<'static> { ReflectOwned::Opaque(self) } - fn clone_value(&self) -> Box { - Box::new(*self) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(*self)) } fn reflect_hash(&self) -> Option { @@ -2445,8 +2596,8 @@ mod tests { Typed, VariantInfo, VariantType, }; use alloc::{collections::BTreeMap, string::String, vec}; - use bevy_platform_support::collections::HashMap; - use bevy_platform_support::time::Instant; + use bevy_platform::collections::HashMap; + use bevy_platform::time::Instant; use core::{ f32::consts::{PI, TAU}, time::Duration, diff --git a/crates/bevy_reflect/src/impls/uuid.rs b/crates/bevy_reflect/src/impls/uuid.rs index 56beedd41a..7385304e28 100644 --- a/crates/bevy_reflect/src/impls/uuid.rs +++ b/crates/bevy_reflect/src/impls/uuid.rs @@ -5,6 +5,7 @@ impl_reflect_opaque!(::uuid::Uuid( Serialize, Deserialize, Default, + Clone, Debug, PartialEq, Hash diff --git a/crates/bevy_reflect/src/impls/wgpu_types.rs b/crates/bevy_reflect/src/impls/wgpu_types.rs index b4a1750ba1..734eace938 100644 --- a/crates/bevy_reflect/src/impls/wgpu_types.rs +++ b/crates/bevy_reflect/src/impls/wgpu_types.rs @@ -1,6 +1,7 @@ use crate::{impl_reflect_opaque, ReflectDeserialize, ReflectSerialize}; impl_reflect_opaque!(::wgpu_types::TextureFormat( + Clone, Debug, Hash, PartialEq, diff --git a/crates/bevy_reflect/src/lib.rs b/crates/bevy_reflect/src/lib.rs index 2845c716c5..58e9b8714f 100644 --- a/crates/bevy_reflect/src/lib.rs +++ b/crates/bevy_reflect/src/lib.rs @@ -112,7 +112,7 @@ //! //! Additionally, using the derive macro on enums requires a third condition to be met: //! * All fields and sub-elements must implement [`FromReflect`]— -//! another important reflection trait discussed in a later section. +//! another important reflection trait discussed in a later section. //! //! # The Reflection Subtraits //! @@ -204,8 +204,8 @@ //! //! They are most commonly used as "proxies" for other types, //! where they contain the same data as— and therefore, represent— a concrete type. -//! The [`PartialReflect::clone_value`] method will return a dynamic type for all non-opaque types, -//! allowing all types to essentially be "cloned". +//! The [`PartialReflect::to_dynamic`] method will return a dynamic type for all non-opaque types, +//! allowing all types to essentially be "cloned" into a dynamic type. //! And since dynamic types themselves implement [`PartialReflect`], //! we may pass them around just like most other reflected types. //! @@ -219,9 +219,9 @@ //! foo: 123 //! }); //! -//! // `cloned` will be a `DynamicStruct` representing a `MyStruct` -//! let cloned: Box = original.clone_value(); -//! assert!(cloned.represents::()); +//! // `dynamic` will be a `DynamicStruct` representing a `MyStruct` +//! let dynamic: Box = original.to_dynamic(); +//! assert!(dynamic.represents::()); //! ``` //! //! ## Patching @@ -253,8 +253,8 @@ //! foo: 123 //! }); //! -//! let cloned: Box = original.clone_value(); -//! let value = cloned.try_take::().unwrap(); // PANIC! +//! let dynamic: Box = original.to_dynamic(); +//! let value = dynamic.try_take::().unwrap(); // PANIC! //! ``` //! //! To resolve this issue, we'll need to convert the dynamic type to the concrete one. @@ -278,8 +278,8 @@ //! foo: 123 //! }); //! -//! let cloned: Box = original.clone_value(); -//! let value = ::from_reflect(&*cloned).unwrap(); // OK! +//! let dynamic: Box = original.to_dynamic(); +//! let value = ::from_reflect(&*dynamic).unwrap(); // OK! //! ``` //! //! When deriving, all active fields and sub-elements must also implement `FromReflect`. @@ -568,6 +568,7 @@ extern crate alloc; extern crate self as bevy_reflect; mod array; +mod error; mod fields; mod from_reflect; #[cfg(feature = "functions")] @@ -633,6 +634,7 @@ pub mod prelude { pub use array::*; pub use enums::*; +pub use error::*; pub use fields::*; pub use from_reflect::*; pub use generics::*; @@ -732,7 +734,7 @@ mod tests { vec, vec::Vec, }; - use bevy_platform_support::collections::HashMap; + use bevy_platform::collections::HashMap; use core::{ any::TypeId, fmt::{Debug, Formatter}, @@ -943,7 +945,7 @@ mod tests { let foo = Foo { a: 1 }; assert!(foo.reflect_hash().is_some()); - let dynamic = foo.clone_dynamic(); + let dynamic = foo.to_dynamic_struct(); let mut map = DynamicMap::default(); map.insert(dynamic, 11u32); @@ -986,6 +988,331 @@ mod tests { assert_eq!(values, vec![1]); } + /// This test ensures that we are able to reflect generic types with one or more type parameters. + /// + /// When there is an `Add` implementation for `String`, the compiler isn't able to infer the correct + /// type to deref to. + /// If we don't append the strings in the `TypePath` derive correctly (i.e. explicitly specifying the type), + /// we'll get a compilation error saying that "`&String` cannot be added to `String`". + /// + /// So this test just ensures that we do do that correctly. + /// + /// This problem is a known issue and is unexpectedly expected behavior: + /// - + /// - + /// - + #[test] + fn should_reflect_generic() { + struct FakeString {} + + // This implementation confuses the compiler when trying to add a `&String` to a `String` + impl core::ops::Add for String { + type Output = Self; + fn add(self, _rhs: FakeString) -> Self::Output { + unreachable!() + } + } + + #[derive(Reflect)] + struct Foo(A); + + #[derive(Reflect)] + struct Bar(A, B); + + #[derive(Reflect)] + struct Baz(A, B, C); + } + + #[test] + fn should_reflect_clone() { + // Struct + #[derive(Reflect, Debug, PartialEq)] + struct Foo(usize); + + let value = Foo(123); + let clone = value.reflect_clone().expect("should reflect_clone struct"); + assert_eq!(value, clone.take::().unwrap()); + + // Tuple + let foo = (123, 4.56); + let clone = foo.reflect_clone().expect("should reflect_clone tuple"); + assert_eq!(foo, clone.take::<(u32, f32)>().unwrap()); + } + + #[test] + fn should_reflect_clone_generic_type() { + #[derive(Reflect, Debug, PartialEq)] + struct Foo(T, #[reflect(ignore, clone)] PhantomData); + #[derive(TypePath, Debug, PartialEq)] + struct Bar; + + // `usize` will be cloned via `Reflect::reflect_clone` + // `PhantomData` will be cloned via `Clone::clone` + let value = Foo::(123, PhantomData); + let clone = value + .reflect_clone() + .expect("should reflect_clone generic struct"); + assert_eq!(value, clone.take::>().unwrap()); + } + + #[test] + fn should_reflect_clone_with_clone() { + // A custom clone function to verify that the `#[reflect(Clone)]` container attribute + // takes precedence over the `#[reflect(clone)]` field attribute. + #[expect( + dead_code, + reason = "if things are working correctly, this function should never be called" + )] + fn custom_clone(_value: &usize) -> usize { + panic!("should not be called"); + } + + // Tuple Struct + #[derive(Reflect, Clone, Debug, PartialEq)] + #[reflect(Clone)] + struct Foo(#[reflect(clone = "custom_clone")] usize); + + let value = Foo(123); + let clone = value + .reflect_clone() + .expect("should reflect_clone tuple struct"); + assert_eq!(value, clone.take::().unwrap()); + + // Struct + #[derive(Reflect, Clone, Debug, PartialEq)] + #[reflect(Clone)] + struct Bar { + #[reflect(clone = "custom_clone")] + value: usize, + } + + let value = Bar { value: 123 }; + let clone = value.reflect_clone().expect("should reflect_clone struct"); + assert_eq!(value, clone.take::().unwrap()); + + // Enum + #[derive(Reflect, Clone, Debug, PartialEq)] + #[reflect(Clone)] + enum Baz { + Unit, + Tuple(#[reflect(clone = "custom_clone")] usize), + Struct { + #[reflect(clone = "custom_clone")] + value: usize, + }, + } + + let value = Baz::Unit; + let clone = value + .reflect_clone() + .expect("should reflect_clone unit variant"); + assert_eq!(value, clone.take::().unwrap()); + + let value = Baz::Tuple(123); + let clone = value + .reflect_clone() + .expect("should reflect_clone tuple variant"); + assert_eq!(value, clone.take::().unwrap()); + + let value = Baz::Struct { value: 123 }; + let clone = value + .reflect_clone() + .expect("should reflect_clone struct variant"); + assert_eq!(value, clone.take::().unwrap()); + } + + #[test] + fn should_custom_reflect_clone() { + #[derive(Reflect, Debug, PartialEq)] + #[reflect(Clone(clone_foo))] + struct Foo(usize); + + fn clone_foo(foo: &Foo) -> Foo { + Foo(foo.0 + 198) + } + + let foo = Foo(123); + let clone = foo.reflect_clone().unwrap(); + assert_eq!(Foo(321), clone.take::().unwrap()); + } + + #[test] + fn should_not_clone_ignored_fields() { + // Tuple Struct + #[derive(Reflect, Clone, Debug, PartialEq)] + struct Foo(#[reflect(ignore)] usize); + + let foo = Foo(123); + let clone = foo.reflect_clone(); + assert_eq!( + clone.unwrap_err(), + ReflectCloneError::FieldNotCloneable { + field: FieldId::Unnamed(0), + variant: None, + container_type_path: Cow::Borrowed(Foo::type_path()), + } + ); + + // Struct + #[derive(Reflect, Clone, Debug, PartialEq)] + struct Bar { + #[reflect(ignore)] + value: usize, + } + + let bar = Bar { value: 123 }; + let clone = bar.reflect_clone(); + assert_eq!( + clone.unwrap_err(), + ReflectCloneError::FieldNotCloneable { + field: FieldId::Named(Cow::Borrowed("value")), + variant: None, + container_type_path: Cow::Borrowed(Bar::type_path()), + } + ); + + // Enum + #[derive(Reflect, Clone, Debug, PartialEq)] + enum Baz { + Tuple(#[reflect(ignore)] usize), + Struct { + #[reflect(ignore)] + value: usize, + }, + } + + let baz = Baz::Tuple(123); + let clone = baz.reflect_clone(); + assert_eq!( + clone.unwrap_err(), + ReflectCloneError::FieldNotCloneable { + field: FieldId::Unnamed(0), + variant: Some(Cow::Borrowed("Tuple")), + container_type_path: Cow::Borrowed(Baz::type_path()), + } + ); + + let baz = Baz::Struct { value: 123 }; + let clone = baz.reflect_clone(); + assert_eq!( + clone.unwrap_err(), + ReflectCloneError::FieldNotCloneable { + field: FieldId::Named(Cow::Borrowed("value")), + variant: Some(Cow::Borrowed("Struct")), + container_type_path: Cow::Borrowed(Baz::type_path()), + } + ); + } + + #[test] + fn should_clone_ignored_fields_with_clone_attributes() { + #[derive(Reflect, Clone, Debug, PartialEq)] + struct Foo(#[reflect(ignore, clone)] usize); + + let foo = Foo(123); + let clone = foo.reflect_clone().unwrap(); + assert_eq!(Foo(123), clone.take::().unwrap()); + + #[derive(Reflect, Clone, Debug, PartialEq)] + struct Bar(#[reflect(ignore, clone = "clone_usize")] usize); + + fn clone_usize(this: &usize) -> usize { + *this + 198 + } + + let bar = Bar(123); + let clone = bar.reflect_clone().unwrap(); + assert_eq!(Bar(321), clone.take::().unwrap()); + } + + #[test] + fn should_composite_reflect_clone() { + #[derive(Reflect, Debug, PartialEq)] + enum MyEnum { + Unit, + Tuple( + Foo, + #[reflect(ignore, clone)] Bar, + #[reflect(clone = "clone_baz")] Baz, + ), + Struct { + foo: Foo, + #[reflect(ignore, clone)] + bar: Bar, + #[reflect(clone = "clone_baz")] + baz: Baz, + }, + } + + #[derive(Reflect, Debug, PartialEq)] + struct Foo { + #[reflect(clone = "clone_bar")] + bar: Bar, + baz: Baz, + } + + #[derive(Reflect, Default, Clone, Debug, PartialEq)] + #[reflect(Clone)] + struct Bar(String); + + #[derive(Reflect, Debug, PartialEq)] + struct Baz(String); + + fn clone_bar(bar: &Bar) -> Bar { + Bar(format!("{}!", bar.0)) + } + + fn clone_baz(baz: &Baz) -> Baz { + Baz(format!("{}!", baz.0)) + } + + let my_enum = MyEnum::Unit; + let clone = my_enum.reflect_clone().unwrap(); + assert_eq!(MyEnum::Unit, clone.take::().unwrap()); + + let my_enum = MyEnum::Tuple( + Foo { + bar: Bar("bar".to_string()), + baz: Baz("baz".to_string()), + }, + Bar("bar".to_string()), + Baz("baz".to_string()), + ); + let clone = my_enum.reflect_clone().unwrap(); + assert_eq!( + MyEnum::Tuple( + Foo { + bar: Bar("bar!".to_string()), + baz: Baz("baz".to_string()), + }, + Bar("bar".to_string()), + Baz("baz!".to_string()), + ), + clone.take::().unwrap() + ); + + let my_enum = MyEnum::Struct { + foo: Foo { + bar: Bar("bar".to_string()), + baz: Baz("baz".to_string()), + }, + bar: Bar("bar".to_string()), + baz: Baz("baz".to_string()), + }; + let clone = my_enum.reflect_clone().unwrap(); + assert_eq!( + MyEnum::Struct { + foo: Foo { + bar: Bar("bar!".to_string()), + baz: Baz("baz".to_string()), + }, + bar: Bar("bar".to_string()), + baz: Baz("baz!".to_string()), + }, + clone.take::().unwrap() + ); + } + #[test] fn should_call_from_reflect_dynamically() { #[derive(Reflect)] @@ -1206,7 +1533,7 @@ mod tests { list.push(3isize); list.push(4isize); list.push(5isize); - foo_patch.insert("c", list.clone_dynamic()); + foo_patch.insert("c", list.to_dynamic_list()); let mut map = DynamicMap::default(); map.insert(2usize, 3i8); @@ -1215,7 +1542,7 @@ mod tests { let mut bar_patch = DynamicStruct::default(); bar_patch.insert("x", 2u32); - foo_patch.insert("e", bar_patch.clone_dynamic()); + foo_patch.insert("e", bar_patch.to_dynamic_struct()); let mut tuple = DynamicTuple::default(); tuple.insert(2i32); @@ -1542,22 +1869,22 @@ mod tests { #[test] fn not_dynamic_names() { let list = Vec::::new(); - let dyn_list = list.clone_dynamic(); + let dyn_list = list.to_dynamic_list(); assert_ne!(dyn_list.reflect_type_path(), Vec::::type_path()); let array = [b'0'; 4]; - let dyn_array = array.clone_dynamic(); + let dyn_array = array.to_dynamic_array(); assert_ne!(dyn_array.reflect_type_path(), <[u8; 4]>::type_path()); let map = HashMap::::default(); - let dyn_map = map.clone_dynamic(); + let dyn_map = map.to_dynamic_map(); assert_ne!( dyn_map.reflect_type_path(), HashMap::::type_path() ); let tuple = (0usize, "1".to_string(), 2.0f32); - let mut dyn_tuple = tuple.clone_dynamic(); + let mut dyn_tuple = tuple.to_dynamic_tuple(); dyn_tuple.insert::(3); assert_ne!( dyn_tuple.reflect_type_path(), @@ -1569,13 +1896,13 @@ mod tests { a: usize, } let struct_ = TestStruct { a: 0 }; - let dyn_struct = struct_.clone_dynamic(); + let dyn_struct = struct_.to_dynamic_struct(); assert_ne!(dyn_struct.reflect_type_path(), TestStruct::type_path()); #[derive(Reflect)] struct TestTupleStruct(usize); let tuple_struct = TestTupleStruct(0); - let dyn_tuple_struct = tuple_struct.clone_dynamic(); + let dyn_tuple_struct = tuple_struct.to_dynamic_tuple_struct(); assert_ne!( dyn_tuple_struct.reflect_type_path(), TestTupleStruct::type_path() @@ -1960,7 +2287,7 @@ mod tests { #[test] fn should_permit_valid_represented_type_for_dynamic() { let type_info = <[i32; 2] as Typed>::type_info(); - let mut dynamic_array = [123; 2].clone_dynamic(); + let mut dynamic_array = [123; 2].to_dynamic_array(); dynamic_array.set_represented_type(Some(type_info)); } @@ -1968,7 +2295,7 @@ mod tests { #[should_panic(expected = "expected TypeInfo::Array but received")] fn should_prohibit_invalid_represented_type_for_dynamic() { let type_info = <(i32, i32) as Typed>::type_info(); - let mut dynamic_array = [123; 2].clone_dynamic(); + let mut dynamic_array = [123; 2].to_dynamic_array(); dynamic_array.set_represented_type(Some(type_info)); } @@ -2351,6 +2678,14 @@ bevy_reflect::tests::Test { assert_not_impl_all!(Foo: Reflect); } + #[test] + fn should_allow_empty_enums() { + #[derive(Reflect)] + enum Empty {} + + assert_impl_all!(Empty: Reflect); + } + #[test] fn recursive_typed_storage_does_not_hang() { #[derive(Reflect)] @@ -2508,7 +2843,7 @@ bevy_reflect::tests::Test { map, value: 12, } - .clone_dynamic(); + .to_dynamic_struct(); // test unknown DynamicStruct let mut test_unknown_struct = DynamicStruct::default(); diff --git a/crates/bevy_reflect/src/list.rs b/crates/bevy_reflect/src/list.rs index e0f019c4b2..2e1c085676 100644 --- a/crates/bevy_reflect/src/list.rs +++ b/crates/bevy_reflect/src/list.rs @@ -104,10 +104,16 @@ pub trait List: PartialReflect { fn drain(&mut self) -> Vec>; /// Clones the list, producing a [`DynamicList`]. + #[deprecated(since = "0.16.0", note = "use `to_dynamic_list` instead")] fn clone_dynamic(&self) -> DynamicList { + self.to_dynamic_list() + } + + /// Creates a new [`DynamicList`] from this list. + fn to_dynamic_list(&self) -> DynamicList { DynamicList { represented_type: self.get_represented_type_info(), - values: self.iter().map(PartialReflect::clone_value).collect(), + values: self.iter().map(PartialReflect::to_dynamic).collect(), } } @@ -246,17 +252,6 @@ impl List for DynamicList { fn drain(&mut self) -> Vec> { self.values.drain(..).collect() } - - fn clone_dynamic(&self) -> DynamicList { - DynamicList { - represented_type: self.represented_type, - values: self - .values - .iter() - .map(|value| value.clone_value()) - .collect(), - } - } } impl PartialReflect for DynamicList { @@ -320,11 +315,6 @@ impl PartialReflect for DynamicList { ReflectOwned::List(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - #[inline] fn reflect_hash(&self) -> Option { list_hash(self) @@ -470,7 +460,7 @@ pub fn list_try_apply(a: &mut L, b: &dyn PartialReflect) -> Result<(), v.try_apply(value)?; } } else { - List::push(a, value.clone_value()); + List::push(a, value.to_dynamic()); } } diff --git a/crates/bevy_reflect/src/map.rs b/crates/bevy_reflect/src/map.rs index f71689d598..0a1c0b689a 100644 --- a/crates/bevy_reflect/src/map.rs +++ b/crates/bevy_reflect/src/map.rs @@ -1,6 +1,6 @@ use core::fmt::{Debug, Formatter}; -use bevy_platform_support::collections::HashTable; +use bevy_platform::collections::HashTable; use bevy_reflect_derive::impl_type_path; use crate::{ @@ -82,7 +82,20 @@ pub trait Map: PartialReflect { fn drain(&mut self) -> Vec<(Box, Box)>; /// Clones the map, producing a [`DynamicMap`]. - fn clone_dynamic(&self) -> DynamicMap; + #[deprecated(since = "0.16.0", note = "use `to_dynamic_map` instead")] + fn clone_dynamic(&self) -> DynamicMap { + self.to_dynamic_map() + } + + /// Creates a new [`DynamicMap`] from this map. + fn to_dynamic_map(&self) -> DynamicMap { + let mut map = DynamicMap::default(); + map.set_represented_type(self.get_represented_type_info()); + for (key, value) in self.iter() { + map.insert_boxed(key.to_dynamic(), value.to_dynamic()); + } + map + } /// Inserts a key-value pair into the map. /// @@ -206,7 +219,6 @@ macro_rules! hash_error { ), } } - .as_str() }} } @@ -244,7 +256,7 @@ impl DynamicMap { } fn internal_hash(value: &dyn PartialReflect) -> u64 { - value.reflect_hash().expect(hash_error!(value)) + value.reflect_hash().expect(&hash_error!(value)) } fn internal_eq<'a>( @@ -303,18 +315,6 @@ impl Map for DynamicMap { self.values.drain(..).collect() } - fn clone_dynamic(&self) -> DynamicMap { - DynamicMap { - represented_type: self.represented_type, - values: self - .values - .iter() - .map(|(key, value)| (key.clone_value(), value.clone_value())) - .collect(), - indices: self.indices.clone(), - } - } - fn insert_boxed( &mut self, key: Box, @@ -432,10 +432,6 @@ impl PartialReflect for DynamicMap { ReflectOwned::Map(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { map_partial_eq(self, value) } @@ -621,7 +617,7 @@ pub fn map_try_apply(a: &mut M, b: &dyn PartialReflect) -> Result<(), Ap if let Some(a_value) = a.get_mut(key) { a_value.try_apply(b_value)?; } else { - a.insert_boxed(key.clone_value(), b_value.clone_value()); + a.insert_boxed(key.to_dynamic(), b_value.to_dynamic()); } } diff --git a/crates/bevy_reflect/src/reflect.rs b/crates/bevy_reflect/src/reflect.rs index 0898ff3630..4918179e12 100644 --- a/crates/bevy_reflect/src/reflect.rs +++ b/crates/bevy_reflect/src/reflect.rs @@ -1,9 +1,11 @@ use crate::{ array_debug, enum_debug, list_debug, map_debug, set_debug, struct_debug, tuple_debug, - tuple_struct_debug, DynamicTypePath, DynamicTyped, OpaqueInfo, ReflectKind, + tuple_struct_debug, DynamicTypePath, DynamicTyped, OpaqueInfo, ReflectCloneError, ReflectKind, ReflectKindMismatchError, ReflectMut, ReflectOwned, ReflectRef, TypeInfo, TypePath, Typed, }; +use alloc::borrow::Cow; use alloc::boxed::Box; +use alloc::string::ToString; use core::{ any::{Any, TypeId}, fmt::Debug, @@ -216,20 +218,116 @@ where /// See [`ReflectOwned`]. fn reflect_owned(self: Box) -> ReflectOwned; - /// Clones the value as a `Reflect` trait object. + /// Clones `Self` into its dynamic representation. /// - /// When deriving `Reflect` for a struct, tuple struct or enum, the value is - /// cloned via [`Struct::clone_dynamic`], [`TupleStruct::clone_dynamic`], - /// or [`Enum::clone_dynamic`], respectively. - /// Implementors of other `Reflect` subtraits (e.g. [`List`], [`Map`]) should - /// use those subtraits' respective `clone_dynamic` methods. + /// For value types or types marked with `#[reflect_value]`, + /// this will simply return a clone of `Self`. + /// + /// Otherwise the associated dynamic type will be returned. + /// + /// For example, a [`List`] type will invoke [`List::clone_dynamic`], returning [`DynamicList`]. + /// A [`Struct`] type will invoke [`Struct::clone_dynamic`], returning [`DynamicStruct`]. + /// And so on. + /// + /// If the dynamic behavior is not desired, a concrete clone can be obtained using [`PartialReflect::reflect_clone`]. + /// + /// # Example + /// + /// ``` + /// # use bevy_reflect::{PartialReflect}; + /// let value = (1, true, 3.14); + /// let cloned = value.clone_value(); + /// assert!(cloned.is_dynamic()) + /// ``` /// - /// [`Struct::clone_dynamic`]: crate::Struct::clone_dynamic - /// [`TupleStruct::clone_dynamic`]: crate::TupleStruct::clone_dynamic - /// [`Enum::clone_dynamic`]: crate::Enum::clone_dynamic /// [`List`]: crate::List - /// [`Map`]: crate::Map - fn clone_value(&self) -> Box; + /// [`List::clone_dynamic`]: crate::List::clone_dynamic + /// [`DynamicList`]: crate::DynamicList + /// [`Struct`]: crate::Struct + /// [`Struct::clone_dynamic`]: crate::Struct::clone_dynamic + /// [`DynamicStruct`]: crate::DynamicStruct + #[deprecated( + since = "0.16.0", + note = "to clone reflected values, prefer using `reflect_clone`. To convert reflected values to dynamic ones, use `to_dynamic`." + )] + fn clone_value(&self) -> Box { + self.to_dynamic() + } + + /// Converts this reflected value into its dynamic representation based on its [kind]. + /// + /// For example, a [`List`] type will internally invoke [`List::to_dynamic_list`], returning [`DynamicList`]. + /// A [`Struct`] type will invoke [`Struct::to_dynamic_struct`], returning [`DynamicStruct`]. + /// And so on. + /// + /// If the [kind] is [opaque], then the value will attempt to be cloned directly via [`reflect_clone`], + /// since opaque types do not have any standard dynamic representation. + /// + /// To attempt to clone the value directly such that it returns a concrete instance of this type, + /// use [`reflect_clone`]. + /// + /// # Panics + /// + /// This method will panic if the [kind] is [opaque] and the call to [`reflect_clone`] fails. + /// + /// # Example + /// + /// ``` + /// # use bevy_reflect::{PartialReflect}; + /// let value = (1, true, 3.14); + /// let dynamic_value = value.to_dynamic(); + /// assert!(dynamic_value.is_dynamic()) + /// ``` + /// + /// [kind]: PartialReflect::reflect_kind + /// [`List`]: crate::List + /// [`List::to_dynamic_list`]: crate::List::to_dynamic_list + /// [`DynamicList`]: crate::DynamicList + /// [`Struct`]: crate::Struct + /// [`Struct::to_dynamic_struct`]: crate::Struct::to_dynamic_struct + /// [`DynamicStruct`]: crate::DynamicStruct + /// [opaque]: crate::ReflectKind::Opaque + /// [`reflect_clone`]: PartialReflect::reflect_clone + fn to_dynamic(&self) -> Box { + match self.reflect_ref() { + ReflectRef::Struct(dyn_struct) => Box::new(dyn_struct.to_dynamic_struct()), + ReflectRef::TupleStruct(dyn_tuple_struct) => { + Box::new(dyn_tuple_struct.to_dynamic_tuple_struct()) + } + ReflectRef::Tuple(dyn_tuple) => Box::new(dyn_tuple.to_dynamic_tuple()), + ReflectRef::List(dyn_list) => Box::new(dyn_list.to_dynamic_list()), + ReflectRef::Array(dyn_array) => Box::new(dyn_array.to_dynamic_array()), + ReflectRef::Map(dyn_map) => Box::new(dyn_map.to_dynamic_map()), + ReflectRef::Set(dyn_set) => Box::new(dyn_set.to_dynamic_set()), + ReflectRef::Enum(dyn_enum) => Box::new(dyn_enum.to_dynamic_enum()), + #[cfg(feature = "functions")] + ReflectRef::Function(dyn_function) => Box::new(dyn_function.to_dynamic_function()), + ReflectRef::Opaque(value) => value.reflect_clone().unwrap().into_partial_reflect(), + } + } + + /// Attempts to clone `Self` using reflection. + /// + /// Unlike [`to_dynamic`], which generally returns a dynamic representation of `Self`, + /// this method attempts create a clone of `Self` directly, if possible. + /// + /// If the clone cannot be performed, an appropriate [`ReflectCloneError`] is returned. + /// + /// # Example + /// + /// ``` + /// # use bevy_reflect::PartialReflect; + /// let value = (1, true, 3.14); + /// let cloned = value.reflect_clone().unwrap(); + /// assert!(cloned.is::<(i32, bool, f64)>()) + /// ``` + /// + /// [`to_dynamic`]: PartialReflect::to_dynamic + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Err(ReflectCloneError::NotImplemented { + type_path: Cow::Owned(self.reflect_type_path().to_string()), + }) + } /// Returns a hash of the value (which includes the type). /// diff --git a/crates/bevy_reflect/src/serde/de/mod.rs b/crates/bevy_reflect/src/serde/de/mod.rs index c6907b56ec..e82b60bcee 100644 --- a/crates/bevy_reflect/src/serde/de/mod.rs +++ b/crates/bevy_reflect/src/serde/de/mod.rs @@ -30,12 +30,11 @@ mod tests { vec, vec::Vec, }; - use bincode::Options; use core::{any::TypeId, f32::consts::PI, ops::RangeInclusive}; use serde::{de::DeserializeSeed, Deserialize}; use serde::{de::IgnoredAny, Deserializer}; - use bevy_platform_support::collections::{HashMap, HashSet}; + use bevy_platform::collections::{HashMap, HashSet}; use crate::{ serde::{ @@ -470,10 +469,9 @@ mod tests { let deserializer = ReflectDeserializer::new(®istry); - let dynamic_output = bincode::DefaultOptions::new() - .with_fixint_encoding() - .deserialize_seed(deserializer, &input) - .unwrap(); + let config = bincode::config::standard().with_fixed_int_encoding(); + let (dynamic_output, _read_bytes) = + bincode::serde::seed_decode_from_slice(deserializer, &input, config).unwrap(); let output = ::from_reflect(dynamic_output.as_ref()).unwrap(); assert_eq!(expected, output); diff --git a/crates/bevy_reflect/src/serde/mod.rs b/crates/bevy_reflect/src/serde/mod.rs index c31d974566..a2c3fe63ed 100644 --- a/crates/bevy_reflect/src/serde/mod.rs +++ b/crates/bevy_reflect/src/serde/mod.rs @@ -164,7 +164,7 @@ mod tests { let mut registry = TypeRegistry::default(); registry.register::(); - let value: DynamicStruct = TestStruct { a: 123, b: 456 }.clone_dynamic(); + let value: DynamicStruct = TestStruct { a: 123, b: 456 }.to_dynamic_struct(); let serializer = ReflectSerializer::new(&value, ®istry); @@ -175,7 +175,7 @@ mod tests { let mut deserializer = ron::de::Deserializer::from_str(&result).unwrap(); let reflect_deserializer = ReflectDeserializer::new(®istry); - let expected = value.clone_value(); + let expected = value.to_dynamic(); let result = reflect_deserializer.deserialize(&mut deserializer).unwrap(); assert!(expected @@ -190,7 +190,7 @@ mod tests { use crate::serde::{ReflectSerializeWithRegistry, SerializeWithRegistry}; use crate::{ReflectFromReflect, TypePath}; use alloc::{format, string::String, vec, vec::Vec}; - use bevy_platform_support::sync::Arc; + use bevy_platform::sync::Arc; use bevy_reflect_derive::reflect_trait; use core::any::TypeId; use core::fmt::{Debug, Formatter}; @@ -340,7 +340,7 @@ mod tests { fn create_arc_dyn_enemy(enemy: T) -> Arc { let arc = Arc::new(enemy); - #[cfg(feature = "portable-atomic")] + #[cfg(not(target_has_atomic = "ptr"))] #[expect( unsafe_code, reason = "unsized coercion is an unstable feature for non-std types" diff --git a/crates/bevy_reflect/src/serde/ser/mod.rs b/crates/bevy_reflect/src/serde/ser/mod.rs index 6cfebc747f..25399e1d71 100644 --- a/crates/bevy_reflect/src/serde/ser/mod.rs +++ b/crates/bevy_reflect/src/serde/ser/mod.rs @@ -24,13 +24,14 @@ mod tests { serde::{ReflectSerializer, ReflectSerializerProcessor}, PartialReflect, Reflect, ReflectSerialize, Struct, TypeRegistry, }; + #[cfg(feature = "functions")] + use alloc::boxed::Box; use alloc::{ - boxed::Box, string::{String, ToString}, vec, vec::Vec, }; - use bevy_platform_support::collections::{HashMap, HashSet}; + use bevy_platform::collections::{HashMap, HashSet}; use core::{any::TypeId, f32::consts::PI, ops::RangeInclusive}; use ron::{extensions::Extensions, ser::PrettyConfig}; use serde::{Serialize, Serializer}; @@ -348,7 +349,8 @@ mod tests { let registry = get_registry(); let serializer = ReflectSerializer::new(&input, ®istry); - let bytes = bincode::serialize(&serializer).unwrap(); + let config = bincode::config::standard().with_fixed_int_encoding(); + let bytes = bincode::serde::encode_to_vec(&serializer, config).unwrap(); let expected: Vec = vec![ 1, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0, 0, 0, 0, 0, 98, 101, 118, 121, 95, 114, 101, 102, @@ -406,7 +408,7 @@ mod tests { some: Some(SomeStruct { foo: 999999999 }), none: None, }; - let dynamic = value.clone_dynamic(); + let dynamic = value.to_dynamic_struct(); let reflect = dynamic.as_partial_reflect(); let registry = get_registry(); diff --git a/crates/bevy_reflect/src/serde/type_data.rs b/crates/bevy_reflect/src/serde/type_data.rs index 8df80c700f..9bb3e134ac 100644 --- a/crates/bevy_reflect/src/serde/type_data.rs +++ b/crates/bevy_reflect/src/serde/type_data.rs @@ -1,6 +1,6 @@ use crate::Reflect; use alloc::boxed::Box; -use bevy_platform_support::collections::{hash_map::Iter, HashMap}; +use bevy_platform::collections::{hash_map::Iter, HashMap}; /// Contains data relevant to the automatic reflect powered (de)serialization of a type. #[derive(Debug, Clone)] @@ -14,9 +14,9 @@ impl SerializationData { /// # Arguments /// /// * `skipped_iter`: The iterator of field indices to be skipped during (de)serialization. - /// Indices are assigned only to reflected fields. - /// Ignored fields (i.e. those marked `#[reflect(ignore)]`) are implicitly skipped - /// and do not need to be included in this iterator. + /// Indices are assigned only to reflected fields. + /// Ignored fields (i.e. those marked `#[reflect(ignore)]`) are implicitly skipped + /// and do not need to be included in this iterator. pub fn new>(skipped_iter: I) -> Self { Self { skipped_fields: skipped_iter.collect(), diff --git a/crates/bevy_reflect/src/set.rs b/crates/bevy_reflect/src/set.rs index 52b4e8b9a1..753662b603 100644 --- a/crates/bevy_reflect/src/set.rs +++ b/crates/bevy_reflect/src/set.rs @@ -1,9 +1,7 @@ use alloc::{boxed::Box, format, vec::Vec}; use core::fmt::{Debug, Formatter}; -use bevy_platform_support::collections::{ - hash_table::OccupiedEntry as HashTableOccupiedEntry, HashTable, -}; +use bevy_platform::collections::{hash_table::OccupiedEntry as HashTableOccupiedEntry, HashTable}; use bevy_reflect_derive::impl_type_path; use crate::{ @@ -70,7 +68,20 @@ pub trait Set: PartialReflect { fn drain(&mut self) -> Vec>; /// Clones the set, producing a [`DynamicSet`]. - fn clone_dynamic(&self) -> DynamicSet; + #[deprecated(since = "0.16.0", note = "use `to_dynamic_set` instead")] + fn clone_dynamic(&self) -> DynamicSet { + self.to_dynamic_set() + } + + /// Creates a new [`DynamicSet`] from this set. + fn to_dynamic_set(&self) -> DynamicSet { + let mut set = DynamicSet::default(); + set.set_represented_type(self.get_represented_type_info()); + for value in self.iter() { + set.insert_boxed(value.to_dynamic()); + } + set + } /// Inserts a value into the set. /// @@ -167,7 +178,7 @@ impl DynamicSet { } fn internal_hash(value: &dyn PartialReflect) -> u64 { - value.reflect_hash().expect(hash_error!(value)) + value.reflect_hash().expect(&hash_error!(value)) } fn internal_eq( @@ -201,23 +212,6 @@ impl Set for DynamicSet { self.hash_table.drain().collect::>() } - fn clone_dynamic(&self) -> DynamicSet { - let mut hash_table = HashTable::new(); - self.hash_table - .iter() - .map(|value| value.clone_value()) - .for_each(|value| { - hash_table.insert_unique(Self::internal_hash(value.as_ref()), value, |boxed| { - Self::internal_hash(boxed.as_ref()) - }); - }); - - DynamicSet { - represented_type: self.represented_type, - hash_table, - } - } - fn insert_boxed(&mut self, value: Box) -> bool { assert_eq!( value.reflect_partial_eq(&*value), @@ -317,10 +311,6 @@ impl PartialReflect for DynamicSet { ReflectOwned::Set(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { set_partial_eq(self, value) } @@ -377,7 +367,7 @@ impl FromIterator for DynamicSet { impl IntoIterator for DynamicSet { type Item = Box; - type IntoIter = bevy_platform_support::collections::hash_table::IntoIter; + type IntoIter = bevy_platform::collections::hash_table::IntoIter; fn into_iter(self) -> Self::IntoIter { self.hash_table.into_iter() @@ -387,7 +377,7 @@ impl IntoIterator for DynamicSet { impl<'a> IntoIterator for &'a DynamicSet { type Item = &'a dyn PartialReflect; type IntoIter = core::iter::Map< - bevy_platform_support::collections::hash_table::Iter<'a, Box>, + bevy_platform::collections::hash_table::Iter<'a, Box>, fn(&'a Box) -> Self::Item, >; @@ -467,7 +457,7 @@ pub fn set_apply(a: &mut M, b: &dyn PartialReflect) { if let ReflectRef::Set(set_value) = b.reflect_ref() { for b_value in set_value.iter() { if a.get(b_value).is_none() { - a.insert_boxed(b_value.clone_value()); + a.insert_boxed(b_value.to_dynamic()); } } } else { @@ -490,7 +480,7 @@ pub fn set_try_apply(a: &mut S, b: &dyn PartialReflect) -> Result<(), Ap for b_value in set_value.iter() { if a.get(b_value).is_none() { - a.insert_boxed(b_value.clone_value()); + a.insert_boxed(b_value.to_dynamic()); } } diff --git a/crates/bevy_reflect/src/struct_trait.rs b/crates/bevy_reflect/src/struct_trait.rs index dc7ed55cef..9146e9aece 100644 --- a/crates/bevy_reflect/src/struct_trait.rs +++ b/crates/bevy_reflect/src/struct_trait.rs @@ -6,8 +6,8 @@ use crate::{ ReflectOwned, ReflectRef, Type, TypeInfo, TypePath, }; use alloc::{borrow::Cow, boxed::Box, vec::Vec}; -use bevy_platform_support::collections::HashMap; -use bevy_platform_support::sync::Arc; +use bevy_platform::collections::HashMap; +use bevy_platform::sync::Arc; use bevy_reflect_derive::impl_type_path; use core::{ fmt::{Debug, Formatter}, @@ -72,7 +72,19 @@ pub trait Struct: PartialReflect { fn iter_fields(&self) -> FieldIter; /// Clones the struct into a [`DynamicStruct`]. - fn clone_dynamic(&self) -> DynamicStruct; + #[deprecated(since = "0.16.0", note = "use `to_dynamic_struct` instead")] + fn clone_dynamic(&self) -> DynamicStruct { + self.to_dynamic_struct() + } + + fn to_dynamic_struct(&self) -> DynamicStruct { + let mut dynamic_struct = DynamicStruct::default(); + dynamic_struct.set_represented_type(self.get_represented_type_info()); + for (i, value) in self.iter_fields().enumerate() { + dynamic_struct.insert_boxed(self.name_at(i).unwrap(), value.to_dynamic()); + } + dynamic_struct + } /// Will return `None` if [`TypeInfo`] is not available. fn get_represented_struct_info(&self) -> Option<&'static StructInfo> { @@ -370,19 +382,6 @@ impl Struct for DynamicStruct { index: 0, } } - - fn clone_dynamic(&self) -> DynamicStruct { - DynamicStruct { - represented_type: self.get_represented_type_info(), - field_names: self.field_names.clone(), - field_indices: self.field_indices.clone(), - fields: self - .fields - .iter() - .map(|value| value.clone_value()) - .collect(), - } - } } impl PartialReflect for DynamicStruct { @@ -449,11 +448,6 @@ impl PartialReflect for DynamicStruct { ReflectOwned::Struct(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { struct_partial_eq(self, value) } diff --git a/crates/bevy_reflect/src/tuple.rs b/crates/bevy_reflect/src/tuple.rs index 28f95b6886..31ad67fdcf 100644 --- a/crates/bevy_reflect/src/tuple.rs +++ b/crates/bevy_reflect/src/tuple.rs @@ -4,9 +4,9 @@ use variadics_please::all_tuples; use crate::generics::impl_generic_info_methods; use crate::{ type_info::impl_type_methods, utility::GenericTypePathCell, ApplyError, FromReflect, Generics, - GetTypeRegistration, MaybeTyped, PartialReflect, Reflect, ReflectKind, ReflectMut, - ReflectOwned, ReflectRef, Type, TypeInfo, TypePath, TypeRegistration, TypeRegistry, Typed, - UnnamedField, + GetTypeRegistration, MaybeTyped, PartialReflect, Reflect, ReflectCloneError, ReflectKind, + ReflectMut, ReflectOwned, ReflectRef, Type, TypeInfo, TypePath, TypeRegistration, TypeRegistry, + Typed, UnnamedField, }; use alloc::{boxed::Box, vec, vec::Vec}; use core::{ @@ -55,8 +55,19 @@ pub trait Tuple: PartialReflect { /// Drain the fields of this tuple to get a vector of owned values. fn drain(self: Box) -> Vec>; - /// Clones the struct into a [`DynamicTuple`]. - fn clone_dynamic(&self) -> DynamicTuple; + /// Clones the tuple into a [`DynamicTuple`]. + #[deprecated(since = "0.16.0", note = "use `to_dynamic_tuple` instead")] + fn clone_dynamic(&self) -> DynamicTuple { + self.to_dynamic_tuple() + } + + /// Creates a new [`DynamicTuple`] from this tuple. + fn to_dynamic_tuple(&self) -> DynamicTuple { + DynamicTuple { + represented_type: self.get_represented_type_info(), + fields: self.iter_fields().map(PartialReflect::to_dynamic).collect(), + } + } /// Will return `None` if [`TypeInfo`] is not available. fn get_represented_tuple_info(&self) -> Option<&'static TupleInfo> { @@ -270,18 +281,6 @@ impl Tuple for DynamicTuple { fn drain(self: Box) -> Vec> { self.fields } - - #[inline] - fn clone_dynamic(&self) -> DynamicTuple { - DynamicTuple { - represented_type: self.represented_type, - fields: self - .fields - .iter() - .map(|value| value.clone_value()) - .collect(), - } - } } impl PartialReflect for DynamicTuple { @@ -339,11 +338,6 @@ impl PartialReflect for DynamicTuple { ReflectOwned::Tuple(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - fn try_apply(&mut self, value: &dyn PartialReflect) -> Result<(), ApplyError> { tuple_try_apply(self, value) } @@ -518,18 +512,6 @@ macro_rules! impl_reflect_tuple { $(Box::new(self.$index),)* ] } - - #[inline] - fn clone_dynamic(&self) -> DynamicTuple { - let info = self.get_represented_type_info(); - DynamicTuple { - represented_type: info, - fields: self - .iter_fields() - .map(|value| value.clone_value()) - .collect(), - } - } } impl<$($name: Reflect + MaybeTyped + TypePath + GetTypeRegistration),*> PartialReflect for ($($name,)*) { @@ -578,10 +560,6 @@ macro_rules! impl_reflect_tuple { ReflectOwned::Tuple(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { crate::tuple_partial_eq(self, value) } @@ -593,6 +571,16 @@ macro_rules! impl_reflect_tuple { fn try_apply(&mut self, value: &dyn PartialReflect) -> Result<(), ApplyError> { crate::tuple_try_apply(self, value) } + + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(( + $( + self.$index.reflect_clone()? + .take::<$name>() + .expect("`Reflect::reflect_clone` should return the same type"), + )* + ))) + } } impl<$($name: Reflect + MaybeTyped + TypePath + GetTypeRegistration),*> Reflect for ($($name,)*) { diff --git a/crates/bevy_reflect/src/tuple_struct.rs b/crates/bevy_reflect/src/tuple_struct.rs index 8348ca7740..09d2819807 100644 --- a/crates/bevy_reflect/src/tuple_struct.rs +++ b/crates/bevy_reflect/src/tuple_struct.rs @@ -8,7 +8,7 @@ use crate::{ ReflectOwned, ReflectRef, Tuple, Type, TypeInfo, TypePath, UnnamedField, }; use alloc::{boxed::Box, vec::Vec}; -use bevy_platform_support::sync::Arc; +use bevy_platform::sync::Arc; use core::{ fmt::{Debug, Formatter}, slice::Iter, @@ -56,7 +56,18 @@ pub trait TupleStruct: PartialReflect { fn iter_fields(&self) -> TupleStructFieldIter; /// Clones the struct into a [`DynamicTupleStruct`]. - fn clone_dynamic(&self) -> DynamicTupleStruct; + #[deprecated(since = "0.16.0", note = "use `to_dynamic_tuple_struct` instead")] + fn clone_dynamic(&self) -> DynamicTupleStruct { + self.to_dynamic_tuple_struct() + } + + /// Creates a new [`DynamicTupleStruct`] from this tuple struct. + fn to_dynamic_tuple_struct(&self) -> DynamicTupleStruct { + DynamicTupleStruct { + represented_type: self.get_represented_type_info(), + fields: self.iter_fields().map(PartialReflect::to_dynamic).collect(), + } + } /// Will return `None` if [`TypeInfo`] is not available. fn get_represented_tuple_struct_info(&self) -> Option<&'static TupleStructInfo> { @@ -279,17 +290,6 @@ impl TupleStruct for DynamicTupleStruct { index: 0, } } - - fn clone_dynamic(&self) -> DynamicTupleStruct { - DynamicTupleStruct { - represented_type: self.represented_type, - fields: self - .fields - .iter() - .map(|value| value.clone_value()) - .collect(), - } - } } impl PartialReflect for DynamicTupleStruct { @@ -357,11 +357,6 @@ impl PartialReflect for DynamicTupleStruct { ReflectOwned::TupleStruct(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - #[inline] fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { tuple_struct_partial_eq(self, value) diff --git a/crates/bevy_reflect/src/type_info.rs b/crates/bevy_reflect/src/type_info.rs index 2add261aa2..1a3be15c36 100644 --- a/crates/bevy_reflect/src/type_info.rs +++ b/crates/bevy_reflect/src/type_info.rs @@ -72,7 +72,6 @@ use thiserror::Error; /// # fn reflect_ref(&self) -> ReflectRef { todo!() } /// # fn reflect_mut(&mut self) -> ReflectMut { todo!() } /// # fn reflect_owned(self: Box) -> ReflectOwned { todo!() } -/// # fn clone_value(&self) -> Box { todo!() } /// # } /// # impl Reflect for MyStruct { /// # fn into_any(self: Box) -> Box { todo!() } diff --git a/crates/bevy_reflect/src/type_registry.rs b/crates/bevy_reflect/src/type_registry.rs index d9d0e509bf..5827ebdac5 100644 --- a/crates/bevy_reflect/src/type_registry.rs +++ b/crates/bevy_reflect/src/type_registry.rs @@ -1,6 +1,6 @@ use crate::{serde::Serializable, FromReflect, Reflect, TypeInfo, TypePath, Typed}; use alloc::{boxed::Box, string::String}; -use bevy_platform_support::{ +use bevy_platform::{ collections::{HashMap, HashSet}, sync::{Arc, PoisonError, RwLock, RwLockReadGuard, RwLockWriteGuard}, }; @@ -165,6 +165,43 @@ impl TypeRegistry { } } + /// Attempts to register the referenced type `T` if it has not yet been registered. + /// + /// See [`register`] for more details. + /// + /// # Example + /// + /// ``` + /// # use bevy_reflect::{Reflect, TypeRegistry}; + /// # use core::any::TypeId; + /// # + /// # let mut type_registry = TypeRegistry::default(); + /// # + /// #[derive(Reflect)] + /// struct Foo { + /// bar: Bar, + /// } + /// + /// #[derive(Reflect)] + /// struct Bar; + /// + /// let foo = Foo { bar: Bar }; + /// + /// // Equivalent to `type_registry.register::()` + /// type_registry.register_by_val(&foo); + /// + /// assert!(type_registry.contains(TypeId::of::())); + /// assert!(type_registry.contains(TypeId::of::())); + /// ``` + /// + /// [`register`]: Self::register + pub fn register_by_val(&mut self, _: &T) + where + T: GetTypeRegistration, + { + self.register::(); + } + /// Attempts to register the type described by `registration`. /// /// If the registration for the type already exists, it will not be registered again. @@ -213,7 +250,7 @@ impl TypeRegistry { type_id: TypeId, get_registration: impl FnOnce() -> TypeRegistration, ) -> bool { - use bevy_platform_support::collections::hash_map::Entry; + use bevy_platform::collections::hash_map::Entry; match self.registrations.entry(type_id) { Entry::Occupied(_) => false, diff --git a/crates/bevy_reflect/src/utility.rs b/crates/bevy_reflect/src/utility.rs index aa340c6363..5735a29dbe 100644 --- a/crates/bevy_reflect/src/utility.rs +++ b/crates/bevy_reflect/src/utility.rs @@ -2,7 +2,7 @@ use crate::TypeInfo; use alloc::boxed::Box; -use bevy_platform_support::{ +use bevy_platform::{ hash::{DefaultHasher, FixedHasher, NoOpHash}, sync::{OnceLock, PoisonError, RwLock}, }; @@ -88,7 +88,6 @@ mod sealed { /// # fn reflect_ref(&self) -> ReflectRef { todo!() } /// # fn reflect_mut(&mut self) -> ReflectMut { todo!() } /// # fn reflect_owned(self: Box) -> ReflectOwned { todo!() } -/// # fn clone_value(&self) -> Box { todo!() } /// # } /// # impl Reflect for Foo { /// # fn into_any(self: Box) -> Box { todo!() } @@ -176,7 +175,6 @@ impl Default for NonGenericTypeCell { /// # fn reflect_ref(&self) -> ReflectRef { todo!() } /// # fn reflect_mut(&mut self) -> ReflectMut { todo!() } /// # fn reflect_owned(self: Box) -> ReflectOwned { todo!() } -/// # fn clone_value(&self) -> Box { todo!() } /// # } /// # impl Reflect for Foo { /// # fn into_any(self: Box) -> Box { todo!() } diff --git a/crates/bevy_remote/Cargo.toml b/crates/bevy_remote/Cargo.toml index 9d2f6f25f8..d2e3395f77 100644 --- a/crates/bevy_remote/Cargo.toml +++ b/crates/bevy_remote/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_remote" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "The Bevy Remote Protocol" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -22,7 +22,7 @@ bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev", features = [ bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", "serialize", ] } diff --git a/crates/bevy_remote/src/builtin_methods.rs b/crates/bevy_remote/src/builtin_methods.rs index 80dc592d9a..ce5fa259a1 100644 --- a/crates/bevy_remote/src/builtin_methods.rs +++ b/crates/bevy_remote/src/builtin_methods.rs @@ -14,17 +14,22 @@ use bevy_ecs::{ system::{In, Local}, world::{EntityRef, EntityWorldMut, FilteredEntityRef, World}, }; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::{ - prelude::ReflectDefault, serde::{ReflectSerializer, TypedReflectDeserializer}, - GetPath as _, NamedField, OpaqueInfo, PartialReflect, ReflectDeserialize, ReflectSerialize, - TypeInfo, TypeRegistration, TypeRegistry, VariantInfo, + GetPath, PartialReflect, TypeRegistration, TypeRegistry, }; use serde::{de::DeserializeSeed as _, Deserialize, Serialize}; -use serde_json::{json, Map, Value}; +use serde_json::{Map, Value}; -use crate::{error_codes, BrpError, BrpResult}; +use crate::{ + error_codes, + schemas::{json_schema::JsonSchemaBevyType, open_rpc::OpenRpcDocument}, + BrpError, BrpResult, +}; + +#[cfg(all(feature = "http", not(target_family = "wasm")))] +use {crate::schemas::open_rpc::ServerObject, bevy_utils::default}; /// The method path for a `bevy/get` request. pub const BRP_GET_METHOD: &str = "bevy/get"; @@ -59,9 +64,27 @@ pub const BRP_GET_AND_WATCH_METHOD: &str = "bevy/get+watch"; /// The method path for a `bevy/list+watch` request. pub const BRP_LIST_AND_WATCH_METHOD: &str = "bevy/list+watch"; +/// The method path for a `bevy/get_resource` request. +pub const BRP_GET_RESOURCE_METHOD: &str = "bevy/get_resource"; + +/// The method path for a `bevy/insert_resource` request. +pub const BRP_INSERT_RESOURCE_METHOD: &str = "bevy/insert_resource"; + +/// The method path for a `bevy/remove_resource` request. +pub const BRP_REMOVE_RESOURCE_METHOD: &str = "bevy/remove_resource"; + +/// The method path for a `bevy/mutate_resource` request. +pub const BRP_MUTATE_RESOURCE_METHOD: &str = "bevy/mutate_resource"; + +/// The method path for a `bevy/list_resources` request. +pub const BRP_LIST_RESOURCES_METHOD: &str = "bevy/list_resources"; + /// The method path for a `bevy/registry/schema` request. pub const BRP_REGISTRY_SCHEMA_METHOD: &str = "bevy/registry/schema"; +/// The method path for a `rpc.discover` request. +pub const RPC_DISCOVER_METHOD: &str = "rpc.discover"; + /// `bevy/get`: Retrieves one or more components from the entity with the given /// ID. /// @@ -87,6 +110,15 @@ pub struct BrpGetParams { pub strict: bool, } +/// `bevy/get_resource`: Retrieves the value of a given resource. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct BrpGetResourceParams { + /// The [full path] of the resource type being requested. + /// + /// [full path]: bevy_reflect::TypePath::type_path + pub resource: String, +} + /// `bevy/query`: Performs a query over components in the ECS, returning entities /// and component values that match. /// @@ -153,6 +185,15 @@ pub struct BrpRemoveParams { pub components: Vec, } +/// `bevy/remove_resource`: Removes the given resource from the world. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct BrpRemoveResourceParams { + /// The [full path] of the resource type to remove. + /// + /// [full path]: bevy_reflect::TypePath::type_path + pub resource: String, +} + /// `bevy/insert`: Adds one or more components to an entity. /// /// The server responds with a null. @@ -173,6 +214,19 @@ pub struct BrpInsertParams { pub components: HashMap, } +/// `bevy/insert_resource`: Inserts a resource into the world with a given +/// value. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct BrpInsertResourceParams { + /// The [full path] of the resource type to insert. + /// + /// [full path]: bevy_reflect::TypePath::type_path + pub resource: String, + + /// The serialized value of the resource to be inserted. + pub value: Value, +} + /// `bevy/reparent`: Assign a new parent to one or more entities. /// /// The server responds with a null. @@ -204,7 +258,7 @@ pub struct BrpListParams { /// /// The server responds with a null. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -pub struct BrpMutateParams { +pub struct BrpMutateComponentParams { /// The entity of the component to mutate. pub entity: Entity, @@ -222,6 +276,25 @@ pub struct BrpMutateParams { pub value: Value, } +/// `bevy/mutate_resource`: +/// +/// The server responds with a null. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct BrpMutateResourceParams { + /// The [full path] of the resource to mutate. + /// + /// [full path]: bevy_reflect::TypePath::type_path + pub resource: String, + + /// The [path] of the field within the resource. + /// + /// [path]: bevy_reflect::GetPath + pub path: String, + + /// The value to insert at `path`. + pub value: Value, +} + /// Describes the data that is to be fetched in a query. #[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq)] pub struct BrpQuery { @@ -323,6 +396,13 @@ pub enum BrpGetResponse { Strict(HashMap), } +/// The response to a `bevy/get_resource` request. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct BrpGetResourceResponse { + /// The value of the requested resource. + pub value: Value, +} + /// A single response from a `bevy/get+watch` request. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(untagged)] @@ -351,6 +431,9 @@ pub enum BrpGetWatchingResponse { /// The response to a `bevy/list` request. pub type BrpListResponse = Vec; +/// The response to a `bevy/list_resources` request. +pub type BrpListResourcesResponse = Vec; + /// A single response from a `bevy/list+watch` request. #[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq)] pub struct BrpListWatchingResponse { @@ -413,6 +496,45 @@ pub fn process_remote_get_request(In(params): In>, world: &World) serde_json::to_value(response).map_err(BrpError::internal) } +/// Handles a `bevy/get_resource` request coming from a client. +pub fn process_remote_get_resource_request( + In(params): In>, + world: &World, +) -> BrpResult { + let BrpGetResourceParams { + resource: resource_path, + } = parse_some(params)?; + + let app_type_registry = world.resource::(); + let type_registry = app_type_registry.read(); + let reflect_resource = + get_reflect_resource(&type_registry, &resource_path).map_err(BrpError::resource_error)?; + + let Ok(reflected) = reflect_resource.reflect(world) else { + return Err(BrpError::resource_not_present(&resource_path)); + }; + + // Use the `ReflectSerializer` to serialize the value of the resource; + // this produces a map with a single item. + let reflect_serializer = ReflectSerializer::new(reflected.as_partial_reflect(), &type_registry); + let Value::Object(serialized_object) = + serde_json::to_value(&reflect_serializer).map_err(BrpError::resource_error)? + else { + return Err(BrpError { + code: error_codes::RESOURCE_ERROR, + message: format!("Resource `{}` could not be serialized", resource_path), + data: None, + }); + }; + + // Get the single value out of the map. + let value = serialized_object.into_values().next().ok_or_else(|| { + BrpError::internal(anyhow!("Unexpected format of serialized resource value")) + })?; + let response = BrpGetResourceResponse { value }; + serde_json::to_value(response).map_err(BrpError::internal) +} + /// Handles a `bevy/get+watch` request coming from a client. pub fn process_remote_get_watching_request( In(params): In>, @@ -569,11 +691,7 @@ fn reflect_component( // Each component value serializes to a map with a single entry. let reflect_serializer = ReflectSerializer::new(reflected.as_partial_reflect(), type_registry); let Value::Object(serialized_object) = - serde_json::to_value(&reflect_serializer).map_err(|err| BrpError { - code: error_codes::COMPONENT_ERROR, - message: err.to_string(), - data: None, - })? + serde_json::to_value(&reflect_serializer).map_err(BrpError::component_error)? else { return Err(BrpError { code: error_codes::COMPONENT_ERROR, @@ -696,6 +814,44 @@ pub fn process_remote_spawn_request(In(params): In>, world: &mut W serde_json::to_value(response).map_err(BrpError::internal) } +/// Handles a `rpc.discover` request coming from a client. +pub fn process_remote_list_methods_request( + In(_params): In>, + world: &mut World, +) -> BrpResult { + let remote_methods = world.resource::(); + + #[cfg(all(feature = "http", not(target_family = "wasm")))] + let servers = match ( + world.get_resource::(), + world.get_resource::(), + ) { + (Some(url), Some(port)) => Some(vec![ServerObject { + name: "Server".to_owned(), + url: format!("{}:{}", url.0, port.0), + ..default() + }]), + (Some(url), None) => Some(vec![ServerObject { + name: "Server".to_owned(), + url: url.0.to_string(), + ..default() + }]), + _ => None, + }; + + #[cfg(any(not(feature = "http"), target_family = "wasm"))] + let servers = None; + + let doc = OpenRpcDocument { + info: Default::default(), + methods: remote_methods.into(), + openrpc: "1.3.2".to_owned(), + servers, + }; + + serde_json::to_value(doc).map_err(BrpError::internal) +} + /// Handles a `bevy/insert` request (insert components) coming from a client. pub fn process_remote_insert_request( In(params): In>, @@ -719,6 +875,29 @@ pub fn process_remote_insert_request( Ok(Value::Null) } +/// Handles a `bevy/insert_resource` request coming from a client. +pub fn process_remote_insert_resource_request( + In(params): In>, + world: &mut World, +) -> BrpResult { + let BrpInsertResourceParams { + resource: resource_path, + value, + } = parse_some(params)?; + + let app_type_registry = world.resource::().clone(); + let type_registry = app_type_registry.read(); + + let reflected_resource = deserialize_resource(&type_registry, &resource_path, value) + .map_err(BrpError::resource_error)?; + + let reflect_resource = + get_reflect_resource(&type_registry, &resource_path).map_err(BrpError::resource_error)?; + reflect_resource.insert(world, &*reflected_resource, &type_registry); + + Ok(Value::Null) +} + /// Handles a `bevy/mutate_component` request coming from a client. /// /// This method allows you to mutate a single field inside an Entity's @@ -727,7 +906,7 @@ pub fn process_remote_mutate_component_request( In(params): In>, world: &mut World, ) -> BrpResult { - let BrpMutateParams { + let BrpMutateComponentParams { entity, component, path, @@ -747,7 +926,7 @@ pub fn process_remote_mutate_component_request( let mut reflected = component_type .data::() .ok_or_else(|| { - BrpError::component_error(anyhow!("Component `{}` isn't registered.", component)) + BrpError::component_error(anyhow!("Component `{}` isn't registered", component)) })? .reflect_mut(world.entity_mut(entity)) .ok_or_else(|| { @@ -783,6 +962,57 @@ pub fn process_remote_mutate_component_request( Ok(Value::Null) } +/// Handles a `bevy/mutate_resource` request coming from a client. +pub fn process_remote_mutate_resource_request( + In(params): In>, + world: &mut World, +) -> BrpResult { + let BrpMutateResourceParams { + resource: resource_path, + path: field_path, + value, + } = parse_some(params)?; + + let app_type_registry = world.resource::().clone(); + let type_registry = app_type_registry.read(); + + // Get the `ReflectResource` for the given resource path. + let reflect_resource = + get_reflect_resource(&type_registry, &resource_path).map_err(BrpError::resource_error)?; + + // Get the actual resource value from the world as a `dyn Reflect`. + let mut reflected_resource = reflect_resource + .reflect_mut(world) + .map_err(|_| BrpError::resource_not_present(&resource_path))?; + + // Get the type registration for the field with the given path. + let value_registration = type_registry + .get_with_type_path( + reflected_resource + .reflect_path(field_path.as_str()) + .map_err(BrpError::resource_error)? + .reflect_type_path(), + ) + .ok_or_else(|| { + BrpError::resource_error(anyhow!("Unknown resource field type: `{}`", resource_path)) + })?; + + // Use the field's type registration to deserialize the given value. + let deserialized_value: Box = + TypedReflectDeserializer::new(value_registration, &type_registry) + .deserialize(&value) + .map_err(BrpError::resource_error)?; + + // Apply the value to the resource. + reflected_resource + .reflect_path_mut(field_path.as_str()) + .map_err(BrpError::resource_error)? + .try_apply(&*deserialized_value) + .map_err(BrpError::resource_error)?; + + Ok(Value::Null) +} + /// Handles a `bevy/remove` request (remove components) coming from a client. pub fn process_remote_remove_request( In(params): In>, @@ -805,6 +1035,25 @@ pub fn process_remote_remove_request( Ok(Value::Null) } +/// Handles a `bevy/remove_resource` request coming from a client. +pub fn process_remote_remove_resource_request( + In(params): In>, + world: &mut World, +) -> BrpResult { + let BrpRemoveResourceParams { + resource: resource_path, + } = parse_some(params)?; + + let app_type_registry = world.resource::().clone(); + let type_registry = app_type_registry.read(); + + let reflect_resource = + get_reflect_resource(&type_registry, &resource_path).map_err(BrpError::resource_error)?; + reflect_resource.remove(world); + + Ok(Value::Null) +} + /// Handles a `bevy/destroy` (despawn entity) request coming from a client. pub fn process_remote_destroy_request( In(params): In>, @@ -881,7 +1130,28 @@ pub fn process_remote_list_request(In(params): In>, world: &World) serde_json::to_value(response).map_err(BrpError::internal) } -/// Handles a `bevy/list` request (list all components) coming from a client. +/// Handles a `bevy/list_resources` request coming from a client. +pub fn process_remote_list_resources_request( + In(_params): In>, + world: &World, +) -> BrpResult { + let mut response = BrpListResourcesResponse::default(); + + let app_type_registry = world.resource::(); + let type_registry = app_type_registry.read(); + + for registered_type in type_registry.iter() { + if registered_type.data::().is_some() { + response.push(registered_type.type_info().type_path().to_owned()); + } + } + + response.sort(); + + serde_json::to_value(response).map_err(BrpError::internal) +} + +/// Handles a `bevy/list+watch` request coming from a client. pub fn process_remote_list_watching_request( In(params): In>, world: &World, @@ -938,7 +1208,7 @@ pub fn export_registry_types(In(params): In>, world: &World) -> Br let types = types.read(); let schemas = types .iter() - .map(export_type) + .map(crate::schemas::json_schema::export_type) .filter(|(_, schema)| { if let Some(crate_name) = &schema.crate_name { if !filter.with_crates.is_empty() @@ -978,338 +1248,6 @@ pub fn export_registry_types(In(params): In>, world: &World) -> Br serde_json::to_value(schemas).map_err(BrpError::internal) } -/// Exports schema info for a given type -fn export_type(reg: &TypeRegistration) -> (String, JsonSchemaBevyType) { - let t = reg.type_info(); - let binding = t.type_path_table(); - - let short_path = binding.short_path(); - let type_path = binding.path(); - let mut typed_schema = JsonSchemaBevyType { - reflect_types: get_registered_reflect_types(reg), - short_path: short_path.to_owned(), - type_path: type_path.to_owned(), - crate_name: binding.crate_name().map(str::to_owned), - module_path: binding.module_path().map(str::to_owned), - ..Default::default() - }; - match t { - TypeInfo::Struct(info) => { - typed_schema.properties = info - .iter() - .map(|field| (field.name().to_owned(), field.ty().ref_type())) - .collect::>(); - typed_schema.required = info - .iter() - .filter(|field| !field.type_path().starts_with("core::option::Option")) - .map(|f| f.name().to_owned()) - .collect::>(); - typed_schema.additional_properties = Some(false); - typed_schema.schema_type = SchemaType::Object; - typed_schema.kind = SchemaKind::Struct; - } - TypeInfo::Enum(info) => { - typed_schema.kind = SchemaKind::Enum; - - let simple = info - .iter() - .all(|variant| matches!(variant, VariantInfo::Unit(_))); - if simple { - typed_schema.schema_type = SchemaType::String; - typed_schema.one_of = info - .iter() - .map(|variant| match variant { - VariantInfo::Unit(v) => v.name().into(), - _ => unreachable!(), - }) - .collect::>(); - } else { - typed_schema.schema_type = SchemaType::Object; - typed_schema.one_of = info - .iter() - .map(|variant| match variant { - VariantInfo::Struct(v) => json!({ - "type": "object", - "kind": "Struct", - "typePath": format!("{}::{}", type_path, v.name()), - "shortPath": v.name(), - "properties": v - .iter() - .map(|field| (field.name().to_owned(), field.ref_type())) - .collect::>(), - "additionalProperties": false, - "required": v - .iter() - .filter(|field| !field.type_path().starts_with("core::option::Option")) - .map(NamedField::name) - .collect::>(), - }), - VariantInfo::Tuple(v) => json!({ - "type": "array", - "kind": "Tuple", - "typePath": format!("{}::{}", type_path, v.name()), - "shortPath": v.name(), - "prefixItems": v - .iter() - .map(SchemaJsonReference::ref_type) - .collect::>(), - "items": false, - }), - VariantInfo::Unit(v) => json!({ - "typePath": format!("{}::{}", type_path, v.name()), - "shortPath": v.name(), - }), - }) - .collect::>(); - } - } - TypeInfo::TupleStruct(info) => { - typed_schema.schema_type = SchemaType::Array; - typed_schema.kind = SchemaKind::TupleStruct; - typed_schema.prefix_items = info - .iter() - .map(SchemaJsonReference::ref_type) - .collect::>(); - typed_schema.items = Some(false.into()); - } - TypeInfo::List(info) => { - typed_schema.schema_type = SchemaType::Array; - typed_schema.kind = SchemaKind::List; - typed_schema.items = info.item_ty().ref_type().into(); - } - TypeInfo::Array(info) => { - typed_schema.schema_type = SchemaType::Array; - typed_schema.kind = SchemaKind::Array; - typed_schema.items = info.item_ty().ref_type().into(); - } - TypeInfo::Map(info) => { - typed_schema.schema_type = SchemaType::Object; - typed_schema.kind = SchemaKind::Map; - typed_schema.key_type = info.key_ty().ref_type().into(); - typed_schema.value_type = info.value_ty().ref_type().into(); - } - TypeInfo::Tuple(info) => { - typed_schema.schema_type = SchemaType::Array; - typed_schema.kind = SchemaKind::Tuple; - typed_schema.prefix_items = info - .iter() - .map(SchemaJsonReference::ref_type) - .collect::>(); - typed_schema.items = Some(false.into()); - } - TypeInfo::Set(info) => { - typed_schema.schema_type = SchemaType::Set; - typed_schema.kind = SchemaKind::Set; - typed_schema.items = info.value_ty().ref_type().into(); - } - TypeInfo::Opaque(info) => { - typed_schema.schema_type = info.map_json_type(); - typed_schema.kind = SchemaKind::Value; - } - }; - - (t.type_path().to_owned(), typed_schema) -} - -fn get_registered_reflect_types(reg: &TypeRegistration) -> Vec { - // Vec could be moved to allow registering more types by game maker. - let registered_reflect_types: [(TypeId, &str); 5] = [ - { (TypeId::of::(), "Component") }, - { (TypeId::of::(), "Resource") }, - { (TypeId::of::(), "Default") }, - { (TypeId::of::(), "Serialize") }, - { (TypeId::of::(), "Deserialize") }, - ]; - let mut result = Vec::new(); - for (id, name) in registered_reflect_types { - if reg.data_by_id(id).is_some() { - result.push(name.to_owned()); - } - } - result -} - -/// JSON Schema type for Bevy Registry Types -/// It tries to follow this standard: -/// -/// To take the full advantage from info provided by Bevy registry it provides extra fields -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] -#[serde(rename_all = "camelCase")] -pub struct JsonSchemaBevyType { - /// Bevy specific field, short path of the type. - pub short_path: String, - /// Bevy specific field, full path of the type. - pub type_path: String, - /// Bevy specific field, path of the module that type is part of. - #[serde(skip_serializing_if = "Option::is_none", default)] - pub module_path: Option, - /// Bevy specific field, name of the crate that type is part of. - #[serde(skip_serializing_if = "Option::is_none", default)] - pub crate_name: Option, - /// Bevy specific field, names of the types that type reflects. - #[serde(skip_serializing_if = "Vec::is_empty", default)] - pub reflect_types: Vec, - /// Bevy specific field, [`TypeInfo`] type mapping. - pub kind: SchemaKind, - /// Bevy specific field, provided when [`SchemaKind`] `kind` field is equal to [`SchemaKind::Map`]. - /// - /// It contains type info of key of the Map. - #[serde(skip_serializing_if = "Option::is_none", default)] - pub key_type: Option, - /// Bevy specific field, provided when [`SchemaKind`] `kind` field is equal to [`SchemaKind::Map`]. - /// - /// It contains type info of value of the Map. - #[serde(skip_serializing_if = "Option::is_none", default)] - pub value_type: Option, - /// The type keyword is fundamental to JSON Schema. It specifies the data type for a schema. - #[serde(rename = "type")] - pub schema_type: SchemaType, - /// The behavior of this keyword depends on the presence and annotation results of "properties" - /// and "patternProperties" within the same schema object. - /// Validation with "additionalProperties" applies only to the child - /// values of instance names that do not appear in the annotation results of either "properties" or "patternProperties". - #[serde(skip_serializing_if = "Option::is_none", default)] - pub additional_properties: Option, - /// Validation succeeds if, for each name that appears in both the instance and as a name - /// within this keyword's value, the child instance for that name successfully validates - /// against the corresponding schema. - #[serde(skip_serializing_if = "HashMap::is_empty", default)] - pub properties: HashMap, - /// An object instance is valid against this keyword if every item in the array is the name of a property in the instance. - #[serde(skip_serializing_if = "Vec::is_empty", default)] - pub required: Vec, - /// An instance validates successfully against this keyword if it validates successfully against exactly one schema defined by this keyword's value. - #[serde(skip_serializing_if = "Vec::is_empty", default)] - pub one_of: Vec, - /// Validation succeeds if each element of the instance validates against the schema at the same position, if any. This keyword does not constrain the length of the array. If the array is longer than this keyword's value, this keyword validates only the prefix of matching length. - /// - /// This keyword produces an annotation value which is the largest index to which this keyword - /// applied a subschema. The value MAY be a boolean true if a subschema was applied to every - /// index of the instance, such as is produced by the "items" keyword. - /// This annotation affects the behavior of "items" and "unevaluatedItems". - #[serde(skip_serializing_if = "Vec::is_empty", default)] - pub prefix_items: Vec, - /// This keyword applies its subschema to all instance elements at indexes greater - /// than the length of the "prefixItems" array in the same schema object, - /// as reported by the annotation result of that "prefixItems" keyword. - /// If no such annotation result exists, "items" applies its subschema to all - /// instance array elements. - /// - /// If the "items" subschema is applied to any positions within the instance array, - /// it produces an annotation result of boolean true, indicating that all remaining - /// array elements have been evaluated against this keyword's subschema. - #[serde(skip_serializing_if = "Option::is_none", default)] - pub items: Option, -} - -/// Kind of json schema, maps [`TypeInfo`] type -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] -pub enum SchemaKind { - /// Struct - #[default] - Struct, - /// Enum type - Enum, - /// A key-value map - Map, - /// Array - Array, - /// List - List, - /// Fixed size collection of items - Tuple, - /// Fixed size collection of items with named fields - TupleStruct, - /// Set of unique values - Set, - /// Single value, eg. primitive types - Value, -} - -/// Type of json schema -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] -#[serde(rename_all = "lowercase")] -pub enum SchemaType { - /// Represents a string value. - String, - /// Represents a floating-point number. - Float, - - /// Represents an unsigned integer. - Uint, - - /// Represents a signed integer. - Int, - - /// Represents an object with key-value pairs. - Object, - - /// Represents an array of values. - Array, - - /// Represents a boolean value (true or false). - Boolean, - - /// Represents a set of unique values. - Set, - - /// Represents a null value. - #[default] - Null, -} - -/// Helper trait for generating json schema reference -trait SchemaJsonReference { - /// Reference to another type in schema. - /// The value `$ref` is a URI-reference that is resolved against the schema. - fn ref_type(self) -> Value; -} - -/// Helper trait for mapping bevy type path into json schema type -trait SchemaJsonType { - /// Bevy Reflect type path - fn get_type_path(&self) -> &'static str; - - /// JSON Schema type keyword from Bevy reflect type path into - fn map_json_type(&self) -> SchemaType { - match self.get_type_path() { - "bool" => SchemaType::Boolean, - "u8" | "u16" | "u32" | "u64" | "u128" | "usize" => SchemaType::Uint, - "i8" | "i16" | "i32" | "i64" | "i128" | "isize" => SchemaType::Int, - "f32" | "f64" => SchemaType::Float, - "char" | "str" | "alloc::string::String" => SchemaType::String, - _ => SchemaType::Object, - } - } -} - -impl SchemaJsonType for OpaqueInfo { - fn get_type_path(&self) -> &'static str { - self.type_path() - } -} - -impl SchemaJsonReference for &bevy_reflect::Type { - fn ref_type(self) -> Value { - let path = self.path(); - json!({"type": json!({ "$ref": format!("#/$defs/{path}") })}) - } -} - -impl SchemaJsonReference for &bevy_reflect::UnnamedField { - fn ref_type(self) -> Value { - let path = self.type_path(); - json!({"type": json!({ "$ref": format!("#/$defs/{path}") })}) - } -} - -impl SchemaJsonReference for &NamedField { - fn ref_type(self) -> Value { - let type_path = self.type_path(); - json!({"type": json!({ "$ref": format!("#/$defs/{type_path}") }), "typePath": self.name()}) - } -} - /// Immutably retrieves an entity from the [`World`], returning an error if the /// entity isn't present. fn get_entity(world: &World, entity: Entity) -> Result, BrpError> { @@ -1451,6 +1389,23 @@ fn deserialize_components( Ok(reflect_components) } +/// Given a resource path and an associated serialized value (`value`), return the +/// deserialized value. +fn deserialize_resource( + type_registry: &TypeRegistry, + resource_path: &str, + value: Value, +) -> AnyhowResult> { + let Some(resource_type) = type_registry.get_with_type_path(resource_path) else { + return Err(anyhow!("Unknown resource type: `{}`", resource_path)); + }; + let reflected: Box = + TypedReflectDeserializer::new(resource_type, type_registry) + .deserialize(&value) + .map_err(|err| anyhow!("{resource_path} is invalid: {err}"))?; + Ok(reflected) +} + /// Given a collection `reflect_components` of reflected component values, insert them into /// the given entity (`entity_world_mut`). fn insert_reflected_components( @@ -1491,6 +1446,30 @@ fn get_component_type_registration<'r>( .ok_or_else(|| anyhow!("Unknown component type: `{}`", component_path)) } +/// Given a resource's type path, return the associated [`ReflectResource`] from the given +/// `type_registry` if possible. +fn get_reflect_resource<'r>( + type_registry: &'r TypeRegistry, + resource_path: &str, +) -> AnyhowResult<&'r ReflectResource> { + let resource_registration = get_resource_type_registration(type_registry, resource_path)?; + + resource_registration + .data::() + .ok_or_else(|| anyhow!("Resource `{}` isn't reflectable", resource_path)) +} + +/// Given a resource's type path, return the associated [`TypeRegistration`] from the given +/// `type_registry` if possible. +fn get_resource_type_registration<'r>( + type_registry: &'r TypeRegistry, + resource_path: &str, +) -> AnyhowResult<&'r TypeRegistration> { + type_registry + .get_with_type_path(resource_path) + .ok_or_else(|| anyhow!("Unknown resource type: `{}`", resource_path)) +} + #[cfg(test)] mod tests { /// A generic function that tests serialization and deserialization of any type @@ -1512,8 +1491,6 @@ mod tests { ); } use super::*; - use bevy_ecs::{component::Component, resource::Resource}; - use bevy_reflect::Reflect; #[test] fn serialization_tests() { @@ -1536,193 +1513,4 @@ mod tests { entity: Entity::from_raw(0), }); } - - #[test] - fn reflect_export_struct() { - #[derive(Reflect, Resource, Default, Deserialize, Serialize)] - #[reflect(Resource, Default, Serialize, Deserialize)] - struct Foo { - a: f32, - b: Option, - } - - let atr = AppTypeRegistry::default(); - { - let mut register = atr.write(); - register.register::(); - } - let type_registry = atr.read(); - let foo_registration = type_registry - .get(TypeId::of::()) - .expect("SHOULD BE REGISTERED") - .clone(); - let (_, schema) = export_type(&foo_registration); - println!("{}", &serde_json::to_string_pretty(&schema).unwrap()); - - assert!( - !schema.reflect_types.contains(&"Component".to_owned()), - "Should not be a component" - ); - assert!( - schema.reflect_types.contains(&"Resource".to_owned()), - "Should be a resource" - ); - let _ = schema.properties.get("a").expect("Missing `a` field"); - let _ = schema.properties.get("b").expect("Missing `b` field"); - assert!( - schema.required.contains(&"a".to_owned()), - "Field a should be required" - ); - assert!( - !schema.required.contains(&"b".to_owned()), - "Field b should not be required" - ); - } - - #[test] - fn reflect_export_enum() { - #[derive(Reflect, Component, Default, Deserialize, Serialize)] - #[reflect(Component, Default, Serialize, Deserialize)] - enum EnumComponent { - ValueOne(i32), - ValueTwo { - test: i32, - }, - #[default] - NoValue, - } - - let atr = AppTypeRegistry::default(); - { - let mut register = atr.write(); - register.register::(); - } - let type_registry = atr.read(); - let foo_registration = type_registry - .get(TypeId::of::()) - .expect("SHOULD BE REGISTERED") - .clone(); - let (_, schema) = export_type(&foo_registration); - assert!( - schema.reflect_types.contains(&"Component".to_owned()), - "Should be a component" - ); - assert!( - !schema.reflect_types.contains(&"Resource".to_owned()), - "Should not be a resource" - ); - assert!(schema.properties.is_empty(), "Should not have any field"); - assert!(schema.one_of.len() == 3, "Should have 3 possible schemas"); - } - - #[test] - fn reflect_export_struct_without_reflect_types() { - #[derive(Reflect, Component, Default, Deserialize, Serialize)] - enum EnumComponent { - ValueOne(i32), - ValueTwo { - test: i32, - }, - #[default] - NoValue, - } - - let atr = AppTypeRegistry::default(); - { - let mut register = atr.write(); - register.register::(); - } - let type_registry = atr.read(); - let foo_registration = type_registry - .get(TypeId::of::()) - .expect("SHOULD BE REGISTERED") - .clone(); - let (_, schema) = export_type(&foo_registration); - assert!( - !schema.reflect_types.contains(&"Component".to_owned()), - "Should not be a component" - ); - assert!( - !schema.reflect_types.contains(&"Resource".to_owned()), - "Should not be a resource" - ); - assert!(schema.properties.is_empty(), "Should not have any field"); - assert!(schema.one_of.len() == 3, "Should have 3 possible schemas"); - } - - #[test] - fn reflect_export_tuple_struct() { - #[derive(Reflect, Component, Default, Deserialize, Serialize)] - #[reflect(Component, Default, Serialize, Deserialize)] - struct TupleStructType(usize, i32); - - let atr = AppTypeRegistry::default(); - { - let mut register = atr.write(); - register.register::(); - } - let type_registry = atr.read(); - let foo_registration = type_registry - .get(TypeId::of::()) - .expect("SHOULD BE REGISTERED") - .clone(); - let (_, schema) = export_type(&foo_registration); - println!("{}", &serde_json::to_string_pretty(&schema).unwrap()); - assert!( - schema.reflect_types.contains(&"Component".to_owned()), - "Should be a component" - ); - assert!( - !schema.reflect_types.contains(&"Resource".to_owned()), - "Should not be a resource" - ); - assert!(schema.properties.is_empty(), "Should not have any field"); - assert!(schema.prefix_items.len() == 2, "Should have 2 prefix items"); - } - - #[test] - fn reflect_export_serialization_check() { - #[derive(Reflect, Resource, Default, Deserialize, Serialize)] - #[reflect(Resource, Default)] - struct Foo { - a: f32, - } - - let atr = AppTypeRegistry::default(); - { - let mut register = atr.write(); - register.register::(); - } - let type_registry = atr.read(); - let foo_registration = type_registry - .get(TypeId::of::()) - .expect("SHOULD BE REGISTERED") - .clone(); - let (_, schema) = export_type(&foo_registration); - let schema_as_value = serde_json::to_value(&schema).expect("Should serialize"); - let value = json!({ - "shortPath": "Foo", - "typePath": "bevy_remote::builtin_methods::tests::Foo", - "modulePath": "bevy_remote::builtin_methods::tests", - "crateName": "bevy_remote", - "reflectTypes": [ - "Resource", - "Default", - ], - "kind": "Struct", - "type": "object", - "additionalProperties": false, - "properties": { - "a": { - "type": { - "$ref": "#/$defs/f32" - } - }, - }, - "required": [ - "a" - ] - }); - assert_eq!(schema_as_value, value); - } } diff --git a/crates/bevy_remote/src/lib.rs b/crates/bevy_remote/src/lib.rs index 75ba79d58e..b21fb97bbb 100644 --- a/crates/bevy_remote/src/lib.rs +++ b/crates/bevy_remote/src/lib.rs @@ -102,7 +102,7 @@ //! in the ECS. Each of these methods uses the `bevy/` prefix, which is a namespace reserved for //! BRP built-in methods. //! -//! ### bevy/get +//! ### `bevy/get` //! //! Retrieve the values of one or more components from an entity. //! @@ -123,7 +123,7 @@ //! //! `result`: A map associating each type name to its value on the requested entity. //! -//! ### bevy/query +//! ### `bevy/query` //! //! Perform a query over components in the ECS, returning all matching entities and their associated //! component values. @@ -137,14 +137,14 @@ //! see _below_ example for a query to list all the type names in **your** project. //! - `option` (optional): An array of fully-qualified type names of components to fetch optionally. //! - `has` (optional): An array of fully-qualified type names of components whose presence will be -//! reported as boolean values. +//! reported as boolean values. //! - `filter` (optional): //! - `with` (optional): An array of fully-qualified type names of components that must be present //! on entities in order for them to be included in results. //! - `without` (optional): An array of fully-qualified type names of components that must *not* be //! present on entities in order for them to be included in results. -//! - `strict` (optional): A flag to enable strict mode which will fail if any one of the -//! components is not present or can not be reflected. Defaults to false. +//! - `strict` (optional): A flag to enable strict mode which will fail if any one of the components +//! is not present or can not be reflected. Defaults to false. //! //! `result`: An array, each of which is an object containing: //! - `entity`: The ID of a query-matching entity. @@ -155,7 +155,7 @@ //! //! //! -//! ### bevy/spawn +//! ### `bevy/spawn` //! //! Create a new entity with the provided components and return the resulting entity ID. //! @@ -165,7 +165,7 @@ //! `result`: //! - `entity`: The ID of the newly spawned entity. //! -//! ### bevy/destroy +//! ### `bevy/destroy` //! //! Despawn the entity with the given ID. //! @@ -174,7 +174,7 @@ //! //! `result`: null. //! -//! ### bevy/remove +//! ### `bevy/remove` //! //! Delete one or more components from an entity. //! @@ -184,7 +184,7 @@ //! //! `result`: null. //! -//! ### bevy/insert +//! ### `bevy/insert` //! //! Insert one or more components into an entity. //! @@ -207,7 +207,7 @@ //! //! `result`: null. //! -//! ### bevy/reparent +//! ### `bevy/reparent` //! //! Assign a new parent to one or more entities. //! @@ -218,7 +218,7 @@ //! //! `result`: null. //! -//! ### bevy/list +//! ### `bevy/list` //! //! List all registered components or all components present on an entity. //! @@ -230,7 +230,7 @@ //! //! `result`: An array of fully-qualified type names of components. //! -//! ### bevy/get+watch +//! ### `bevy/get+watch` //! //! Watch the values of one or more components from an entity. //! @@ -258,7 +258,7 @@ //! - `removed`: An array of fully-qualified type names of components removed from the entity //! in the last tick. //! -//! ### bevy/list+watch +//! ### `bevy/list+watch` //! //! Watch all components present on an entity. //! @@ -274,6 +274,52 @@ //! - `removed`: An array of fully-qualified type names of components removed from the entity //! in the last tick. //! +//! ### `bevy/get_resource` +//! +//! Extract the value of a given resource from the world. +//! +//! `params`: +//! - `resource`: The [fully-qualified type name] of the resource to get. +//! +//! `result`: +//! - `value`: The value of the resource in the world. +//! +//! ### `bevy/insert_resource` +//! +//! Insert the given resource into the world with the given value. +//! +//! `params`: +//! - `resource`: The [fully-qualified type name] of the resource to insert. +//! - `value`: The value of the resource to be inserted. +//! +//! `result`: null. +//! +//! ### `bevy/remove_resource` +//! +//! Remove the given resource from the world. +//! +//! `params` +//! - `resource`: The [fully-qualified type name] of the resource to remove. +//! +//! `result`: null. +//! +//! ### `bevy/mutate_resource` +//! +//! Mutate a field in a resource. +//! +//! `params`: +//! - `resource`: The [fully-qualified type name] of the resource to mutate. +//! - `path`: The path of the field within the resource. See +//! [`GetPath`](bevy_reflect::GetPath#syntax) for more information on formatting this string. +//! - `value`: The value to be inserted at `path`. +//! +//! `result`: null. +//! +//! ### `bevy/list_resources` +//! +//! List all reflectable registered resource types. This method has no parameters. +//! +//! `result`: An array of [fully-qualified type names] of registered resource types. //! //! ## Custom methods //! @@ -324,11 +370,11 @@ use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ entity::Entity, resource::Resource, - schedule::{IntoSystemConfigs, IntoSystemSetConfigs, ScheduleLabel, SystemSet}, + schedule::{IntoScheduleConfigs, ScheduleLabel, SystemSet}, system::{Commands, In, IntoSystem, ResMut, System, SystemId}, world::World, }; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_utils::prelude::default; use serde::{Deserialize, Serialize}; use serde_json::Value; @@ -337,6 +383,7 @@ use std::sync::RwLock; pub mod builtin_methods; #[cfg(feature = "http")] pub mod http; +pub mod schemas; const CHANNEL_SIZE: usize = 16; @@ -424,14 +471,14 @@ impl Default for RemotePlugin { builtin_methods::BRP_LIST_METHOD, builtin_methods::process_remote_list_request, ) - .with_method( - builtin_methods::BRP_REGISTRY_SCHEMA_METHOD, - builtin_methods::export_registry_types, - ) .with_method( builtin_methods::BRP_MUTATE_COMPONENT_METHOD, builtin_methods::process_remote_mutate_component_request, ) + .with_method( + builtin_methods::RPC_DISCOVER_METHOD, + builtin_methods::process_remote_list_methods_request, + ) .with_watching_method( builtin_methods::BRP_GET_AND_WATCH_METHOD, builtin_methods::process_remote_get_watching_request, @@ -440,6 +487,30 @@ impl Default for RemotePlugin { builtin_methods::BRP_LIST_AND_WATCH_METHOD, builtin_methods::process_remote_list_watching_request, ) + .with_method( + builtin_methods::BRP_GET_RESOURCE_METHOD, + builtin_methods::process_remote_get_resource_request, + ) + .with_method( + builtin_methods::BRP_INSERT_RESOURCE_METHOD, + builtin_methods::process_remote_insert_resource_request, + ) + .with_method( + builtin_methods::BRP_REMOVE_RESOURCE_METHOD, + builtin_methods::process_remote_remove_resource_request, + ) + .with_method( + builtin_methods::BRP_MUTATE_RESOURCE_METHOD, + builtin_methods::process_remote_mutate_resource_request, + ) + .with_method( + builtin_methods::BRP_LIST_RESOURCES_METHOD, + builtin_methods::process_remote_list_resources_request, + ) + .with_method( + builtin_methods::BRP_REGISTRY_SCHEMA_METHOD, + builtin_methods::export_registry_types, + ) } } @@ -487,7 +558,7 @@ impl Plugin for RemotePlugin { } /// Schedule that contains all systems to process Bevy Remote Protocol requests -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct RemoteLast; /// The systems sets of the [`RemoteLast`] schedule. @@ -565,6 +636,11 @@ impl RemoteMethods { pub fn get(&self, method: &str) -> Option<&RemoteMethodSystemId> { self.0.get(method) } + + /// Get a [`Vec`] with method names. + pub fn methods(&self) -> Vec { + self.0.keys().cloned().collect() + } } /// Holds the [`BrpMessage`]'s of all ongoing watching requests along with their handlers. @@ -718,6 +794,26 @@ impl BrpError { } } + /// Resource was not present in the world. + #[must_use] + pub fn resource_not_present(resource: &str) -> Self { + Self { + code: error_codes::RESOURCE_NOT_PRESENT, + message: format!("Resource `{resource}` not present in the world"), + data: None, + } + } + + /// An arbitrary resource error. Possibly related to reflection. + #[must_use] + pub fn resource_error(error: E) -> Self { + Self { + code: error_codes::RESOURCE_ERROR, + message: error.to_string(), + data: None, + } + } + /// An arbitrary internal error. #[must_use] pub fn internal(error: E) -> Self { @@ -772,6 +868,12 @@ pub mod error_codes { /// Cannot reparent an entity to itself. pub const SELF_REPARENT: i16 = -23404; + + /// Could not reflect or find resource. + pub const RESOURCE_ERROR: i16 = -23501; + + /// Could not find resource in the world. + pub const RESOURCE_NOT_PRESENT: i16 = -23502; } /// The result of a request. diff --git a/crates/bevy_remote/src/schemas/json_schema.rs b/crates/bevy_remote/src/schemas/json_schema.rs new file mode 100644 index 0000000000..3fcc588f92 --- /dev/null +++ b/crates/bevy_remote/src/schemas/json_schema.rs @@ -0,0 +1,543 @@ +//! Module with JSON Schema type for Bevy Registry Types. +//! It tries to follow this standard: +use bevy_ecs::reflect::{ReflectComponent, ReflectResource}; +use bevy_platform::collections::HashMap; +use bevy_reflect::{ + prelude::ReflectDefault, NamedField, OpaqueInfo, ReflectDeserialize, ReflectSerialize, + TypeInfo, TypeRegistration, VariantInfo, +}; +use core::any::TypeId; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Map, Value}; + +/// Exports schema info for a given type +pub fn export_type(reg: &TypeRegistration) -> (String, JsonSchemaBevyType) { + (reg.type_info().type_path().to_owned(), reg.into()) +} + +fn get_registered_reflect_types(reg: &TypeRegistration) -> Vec { + // Vec could be moved to allow registering more types by game maker. + let registered_reflect_types: [(TypeId, &str); 5] = [ + { (TypeId::of::(), "Component") }, + { (TypeId::of::(), "Resource") }, + { (TypeId::of::(), "Default") }, + { (TypeId::of::(), "Serialize") }, + { (TypeId::of::(), "Deserialize") }, + ]; + let mut result = Vec::new(); + for (id, name) in registered_reflect_types { + if reg.data_by_id(id).is_some() { + result.push(name.to_owned()); + } + } + result +} + +impl From<&TypeRegistration> for JsonSchemaBevyType { + fn from(reg: &TypeRegistration) -> Self { + let t = reg.type_info(); + let binding = t.type_path_table(); + + let short_path = binding.short_path(); + let type_path = binding.path(); + let mut typed_schema = JsonSchemaBevyType { + reflect_types: get_registered_reflect_types(reg), + short_path: short_path.to_owned(), + type_path: type_path.to_owned(), + crate_name: binding.crate_name().map(str::to_owned), + module_path: binding.module_path().map(str::to_owned), + ..Default::default() + }; + match t { + TypeInfo::Struct(info) => { + typed_schema.properties = info + .iter() + .map(|field| (field.name().to_owned(), field.ty().ref_type())) + .collect::>(); + typed_schema.required = info + .iter() + .filter(|field| !field.type_path().starts_with("core::option::Option")) + .map(|f| f.name().to_owned()) + .collect::>(); + typed_schema.additional_properties = Some(false); + typed_schema.schema_type = SchemaType::Object; + typed_schema.kind = SchemaKind::Struct; + } + TypeInfo::Enum(info) => { + typed_schema.kind = SchemaKind::Enum; + + let simple = info + .iter() + .all(|variant| matches!(variant, VariantInfo::Unit(_))); + if simple { + typed_schema.schema_type = SchemaType::String; + typed_schema.one_of = info + .iter() + .map(|variant| match variant { + VariantInfo::Unit(v) => v.name().into(), + _ => unreachable!(), + }) + .collect::>(); + } else { + typed_schema.schema_type = SchemaType::Object; + typed_schema.one_of = info + .iter() + .map(|variant| match variant { + VariantInfo::Struct(v) => json!({ + "type": "object", + "kind": "Struct", + "typePath": format!("{}::{}", type_path, v.name()), + "shortPath": v.name(), + "properties": v + .iter() + .map(|field| (field.name().to_owned(), field.ref_type())) + .collect::>(), + "additionalProperties": false, + "required": v + .iter() + .filter(|field| !field.type_path().starts_with("core::option::Option")) + .map(NamedField::name) + .collect::>(), + }), + VariantInfo::Tuple(v) => json!({ + "type": "array", + "kind": "Tuple", + "typePath": format!("{}::{}", type_path, v.name()), + "shortPath": v.name(), + "prefixItems": v + .iter() + .map(SchemaJsonReference::ref_type) + .collect::>(), + "items": false, + }), + VariantInfo::Unit(v) => json!({ + "typePath": format!("{}::{}", type_path, v.name()), + "shortPath": v.name(), + }), + }) + .collect::>(); + } + } + TypeInfo::TupleStruct(info) => { + typed_schema.schema_type = SchemaType::Array; + typed_schema.kind = SchemaKind::TupleStruct; + typed_schema.prefix_items = info + .iter() + .map(SchemaJsonReference::ref_type) + .collect::>(); + typed_schema.items = Some(false.into()); + } + TypeInfo::List(info) => { + typed_schema.schema_type = SchemaType::Array; + typed_schema.kind = SchemaKind::List; + typed_schema.items = info.item_ty().ref_type().into(); + } + TypeInfo::Array(info) => { + typed_schema.schema_type = SchemaType::Array; + typed_schema.kind = SchemaKind::Array; + typed_schema.items = info.item_ty().ref_type().into(); + } + TypeInfo::Map(info) => { + typed_schema.schema_type = SchemaType::Object; + typed_schema.kind = SchemaKind::Map; + typed_schema.key_type = info.key_ty().ref_type().into(); + typed_schema.value_type = info.value_ty().ref_type().into(); + } + TypeInfo::Tuple(info) => { + typed_schema.schema_type = SchemaType::Array; + typed_schema.kind = SchemaKind::Tuple; + typed_schema.prefix_items = info + .iter() + .map(SchemaJsonReference::ref_type) + .collect::>(); + typed_schema.items = Some(false.into()); + } + TypeInfo::Set(info) => { + typed_schema.schema_type = SchemaType::Set; + typed_schema.kind = SchemaKind::Set; + typed_schema.items = info.value_ty().ref_type().into(); + } + TypeInfo::Opaque(info) => { + typed_schema.schema_type = info.map_json_type(); + typed_schema.kind = SchemaKind::Value; + } + }; + typed_schema + } +} + +/// JSON Schema type for Bevy Registry Types +/// It tries to follow this standard: +/// +/// To take the full advantage from info provided by Bevy registry it provides extra fields +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] +#[serde(rename_all = "camelCase")] +pub struct JsonSchemaBevyType { + /// Bevy specific field, short path of the type. + pub short_path: String, + /// Bevy specific field, full path of the type. + pub type_path: String, + /// Bevy specific field, path of the module that type is part of. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub module_path: Option, + /// Bevy specific field, name of the crate that type is part of. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub crate_name: Option, + /// Bevy specific field, names of the types that type reflects. + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub reflect_types: Vec, + /// Bevy specific field, [`TypeInfo`] type mapping. + pub kind: SchemaKind, + /// Bevy specific field, provided when [`SchemaKind`] `kind` field is equal to [`SchemaKind::Map`]. + /// + /// It contains type info of key of the Map. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub key_type: Option, + /// Bevy specific field, provided when [`SchemaKind`] `kind` field is equal to [`SchemaKind::Map`]. + /// + /// It contains type info of value of the Map. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub value_type: Option, + /// The type keyword is fundamental to JSON Schema. It specifies the data type for a schema. + #[serde(rename = "type")] + pub schema_type: SchemaType, + /// The behavior of this keyword depends on the presence and annotation results of "properties" + /// and "patternProperties" within the same schema object. + /// Validation with "additionalProperties" applies only to the child + /// values of instance names that do not appear in the annotation results of either "properties" or "patternProperties". + #[serde(skip_serializing_if = "Option::is_none", default)] + pub additional_properties: Option, + /// Validation succeeds if, for each name that appears in both the instance and as a name + /// within this keyword's value, the child instance for that name successfully validates + /// against the corresponding schema. + #[serde(skip_serializing_if = "HashMap::is_empty", default)] + pub properties: HashMap, + /// An object instance is valid against this keyword if every item in the array is the name of a property in the instance. + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub required: Vec, + /// An instance validates successfully against this keyword if it validates successfully against exactly one schema defined by this keyword's value. + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub one_of: Vec, + /// Validation succeeds if each element of the instance validates against the schema at the same position, if any. This keyword does not constrain the length of the array. If the array is longer than this keyword's value, this keyword validates only the prefix of matching length. + /// + /// This keyword produces an annotation value which is the largest index to which this keyword + /// applied a subschema. The value MAY be a boolean true if a subschema was applied to every + /// index of the instance, such as is produced by the "items" keyword. + /// This annotation affects the behavior of "items" and "unevaluatedItems". + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub prefix_items: Vec, + /// This keyword applies its subschema to all instance elements at indexes greater + /// than the length of the "prefixItems" array in the same schema object, + /// as reported by the annotation result of that "prefixItems" keyword. + /// If no such annotation result exists, "items" applies its subschema to all + /// instance array elements. + /// + /// If the "items" subschema is applied to any positions within the instance array, + /// it produces an annotation result of boolean true, indicating that all remaining + /// array elements have been evaluated against this keyword's subschema. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub items: Option, +} + +/// Kind of json schema, maps [`TypeInfo`] type +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] +pub enum SchemaKind { + /// Struct + #[default] + Struct, + /// Enum type + Enum, + /// A key-value map + Map, + /// Array + Array, + /// List + List, + /// Fixed size collection of items + Tuple, + /// Fixed size collection of items with named fields + TupleStruct, + /// Set of unique values + Set, + /// Single value, eg. primitive types + Value, +} + +/// Type of json schema +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum SchemaType { + /// Represents a string value. + String, + + /// Represents a floating-point number. + Float, + + /// Represents an unsigned integer. + Uint, + + /// Represents a signed integer. + Int, + + /// Represents an object with key-value pairs. + Object, + + /// Represents an array of values. + Array, + + /// Represents a boolean value (true or false). + Boolean, + + /// Represents a set of unique values. + Set, + + /// Represents a null value. + #[default] + Null, +} + +/// Helper trait for generating json schema reference +trait SchemaJsonReference { + /// Reference to another type in schema. + /// The value `$ref` is a URI-reference that is resolved against the schema. + fn ref_type(self) -> Value; +} + +/// Helper trait for mapping bevy type path into json schema type +pub trait SchemaJsonType { + /// Bevy Reflect type path + fn get_type_path(&self) -> &'static str; + + /// JSON Schema type keyword from Bevy reflect type path into + fn map_json_type(&self) -> SchemaType { + match self.get_type_path() { + "bool" => SchemaType::Boolean, + "u8" | "u16" | "u32" | "u64" | "u128" | "usize" => SchemaType::Uint, + "i8" | "i16" | "i32" | "i64" | "i128" | "isize" => SchemaType::Int, + "f32" | "f64" => SchemaType::Float, + "char" | "str" | "alloc::string::String" => SchemaType::String, + _ => SchemaType::Object, + } + } +} + +impl SchemaJsonType for OpaqueInfo { + fn get_type_path(&self) -> &'static str { + self.type_path() + } +} + +impl SchemaJsonReference for &bevy_reflect::Type { + fn ref_type(self) -> Value { + let path = self.path(); + json!({"type": json!({ "$ref": format!("#/$defs/{path}") })}) + } +} + +impl SchemaJsonReference for &bevy_reflect::UnnamedField { + fn ref_type(self) -> Value { + let path = self.type_path(); + json!({"type": json!({ "$ref": format!("#/$defs/{path}") })}) + } +} + +impl SchemaJsonReference for &NamedField { + fn ref_type(self) -> Value { + let type_path = self.type_path(); + json!({"type": json!({ "$ref": format!("#/$defs/{type_path}") }), "typePath": self.name()}) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bevy_ecs::{component::Component, reflect::AppTypeRegistry, resource::Resource}; + use bevy_reflect::Reflect; + + #[test] + fn reflect_export_struct() { + #[derive(Reflect, Resource, Default, Deserialize, Serialize)] + #[reflect(Resource, Default, Serialize, Deserialize)] + struct Foo { + a: f32, + b: Option, + } + + let atr = AppTypeRegistry::default(); + { + let mut register = atr.write(); + register.register::(); + } + let type_registry = atr.read(); + let foo_registration = type_registry + .get(TypeId::of::()) + .expect("SHOULD BE REGISTERED") + .clone(); + let (_, schema) = export_type(&foo_registration); + + assert!( + !schema.reflect_types.contains(&"Component".to_owned()), + "Should not be a component" + ); + assert!( + schema.reflect_types.contains(&"Resource".to_owned()), + "Should be a resource" + ); + let _ = schema.properties.get("a").expect("Missing `a` field"); + let _ = schema.properties.get("b").expect("Missing `b` field"); + assert!( + schema.required.contains(&"a".to_owned()), + "Field a should be required" + ); + assert!( + !schema.required.contains(&"b".to_owned()), + "Field b should not be required" + ); + } + + #[test] + fn reflect_export_enum() { + #[derive(Reflect, Component, Default, Deserialize, Serialize)] + #[reflect(Component, Default, Serialize, Deserialize)] + enum EnumComponent { + ValueOne(i32), + ValueTwo { + test: i32, + }, + #[default] + NoValue, + } + + let atr = AppTypeRegistry::default(); + { + let mut register = atr.write(); + register.register::(); + } + let type_registry = atr.read(); + let foo_registration = type_registry + .get(TypeId::of::()) + .expect("SHOULD BE REGISTERED") + .clone(); + let (_, schema) = export_type(&foo_registration); + assert!( + schema.reflect_types.contains(&"Component".to_owned()), + "Should be a component" + ); + assert!( + !schema.reflect_types.contains(&"Resource".to_owned()), + "Should not be a resource" + ); + assert!(schema.properties.is_empty(), "Should not have any field"); + assert!(schema.one_of.len() == 3, "Should have 3 possible schemas"); + } + + #[test] + fn reflect_export_struct_without_reflect_types() { + #[derive(Reflect, Component, Default, Deserialize, Serialize)] + enum EnumComponent { + ValueOne(i32), + ValueTwo { + test: i32, + }, + #[default] + NoValue, + } + + let atr = AppTypeRegistry::default(); + { + let mut register = atr.write(); + register.register::(); + } + let type_registry = atr.read(); + let foo_registration = type_registry + .get(TypeId::of::()) + .expect("SHOULD BE REGISTERED") + .clone(); + let (_, schema) = export_type(&foo_registration); + assert!( + !schema.reflect_types.contains(&"Component".to_owned()), + "Should not be a component" + ); + assert!( + !schema.reflect_types.contains(&"Resource".to_owned()), + "Should not be a resource" + ); + assert!(schema.properties.is_empty(), "Should not have any field"); + assert!(schema.one_of.len() == 3, "Should have 3 possible schemas"); + } + + #[test] + fn reflect_export_tuple_struct() { + #[derive(Reflect, Component, Default, Deserialize, Serialize)] + #[reflect(Component, Default, Serialize, Deserialize)] + struct TupleStructType(usize, i32); + + let atr = AppTypeRegistry::default(); + { + let mut register = atr.write(); + register.register::(); + } + let type_registry = atr.read(); + let foo_registration = type_registry + .get(TypeId::of::()) + .expect("SHOULD BE REGISTERED") + .clone(); + let (_, schema) = export_type(&foo_registration); + assert!( + schema.reflect_types.contains(&"Component".to_owned()), + "Should be a component" + ); + assert!( + !schema.reflect_types.contains(&"Resource".to_owned()), + "Should not be a resource" + ); + assert!(schema.properties.is_empty(), "Should not have any field"); + assert!(schema.prefix_items.len() == 2, "Should have 2 prefix items"); + } + + #[test] + fn reflect_export_serialization_check() { + #[derive(Reflect, Resource, Default, Deserialize, Serialize)] + #[reflect(Resource, Default)] + struct Foo { + a: f32, + } + + let atr = AppTypeRegistry::default(); + { + let mut register = atr.write(); + register.register::(); + } + let type_registry = atr.read(); + let foo_registration = type_registry + .get(TypeId::of::()) + .expect("SHOULD BE REGISTERED") + .clone(); + let (_, schema) = export_type(&foo_registration); + let schema_as_value = serde_json::to_value(&schema).expect("Should serialize"); + let value = json!({ + "shortPath": "Foo", + "typePath": "bevy_remote::schemas::json_schema::tests::Foo", + "modulePath": "bevy_remote::schemas::json_schema::tests", + "crateName": "bevy_remote", + "reflectTypes": [ + "Resource", + "Default", + ], + "kind": "Struct", + "type": "object", + "additionalProperties": false, + "properties": { + "a": { + "type": { + "$ref": "#/$defs/f32" + } + }, + }, + "required": [ + "a" + ] + }); + assert_eq!(schema_as_value, value); + } +} diff --git a/crates/bevy_remote/src/schemas/mod.rs b/crates/bevy_remote/src/schemas/mod.rs new file mode 100644 index 0000000000..7104fd5547 --- /dev/null +++ b/crates/bevy_remote/src/schemas/mod.rs @@ -0,0 +1,4 @@ +//! Module with schemas used for various BRP endpoints + +pub mod json_schema; +pub mod open_rpc; diff --git a/crates/bevy_remote/src/schemas/open_rpc.rs b/crates/bevy_remote/src/schemas/open_rpc.rs new file mode 100644 index 0000000000..0ffda36bc3 --- /dev/null +++ b/crates/bevy_remote/src/schemas/open_rpc.rs @@ -0,0 +1,118 @@ +//! Module with trimmed down `OpenRPC` document structs. +//! It tries to follow this standard: +use bevy_platform::collections::HashMap; +use bevy_utils::default; +use serde::{Deserialize, Serialize}; + +use crate::RemoteMethods; + +use super::json_schema::JsonSchemaBevyType; + +/// Represents an `OpenRPC` document as defined by the `OpenRPC` specification. +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OpenRpcDocument { + /// The version of the `OpenRPC` specification being used. + pub openrpc: String, + /// Informational metadata about the document. + pub info: InfoObject, + /// List of RPC methods defined in the document. + pub methods: Vec, + /// Optional list of server objects that provide the API endpoint details. + pub servers: Option>, +} + +/// Contains metadata information about the `OpenRPC` document. +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct InfoObject { + /// The title of the API or document. + pub title: String, + /// The version of the API. + pub version: String, + /// An optional description providing additional details about the API. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// A collection of custom extension fields. + #[serde(flatten)] + pub extensions: HashMap, +} + +impl Default for InfoObject { + fn default() -> Self { + Self { + title: "Bevy Remote Protocol".to_owned(), + version: env!("CARGO_PKG_VERSION").to_owned(), + description: None, + extensions: Default::default(), + } + } +} + +/// Describes a server hosting the API as specified in the `OpenRPC` document. +#[derive(Serialize, Deserialize, Debug, Default)] +#[serde(rename_all = "camelCase")] +pub struct ServerObject { + /// The name of the server. + pub name: String, + /// The URL endpoint of the server. + pub url: String, + /// An optional description of the server. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// Additional custom extension fields. + #[serde(flatten)] + pub extensions: HashMap, +} + +/// Represents an RPC method in the `OpenRPC` document. +#[derive(Serialize, Deserialize, Debug, Default)] +#[serde(rename_all = "camelCase")] +pub struct MethodObject { + /// The method name (e.g., "/bevy/get") + pub name: String, + /// An optional short summary of the method. + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + /// An optional detailed description of the method. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// Parameters for the RPC method + #[serde(default)] + pub params: Vec, + // /// The expected result of the method + // #[serde(skip_serializing_if = "Option::is_none")] + // pub result: Option, + /// Additional custom extension fields. + #[serde(flatten)] + pub extensions: HashMap, +} + +/// Represents an RPC method parameter in the `OpenRPC` document. +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct Parameter { + /// Parameter name + pub name: String, + /// Parameter description + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// JSON schema describing the parameter + pub schema: JsonSchemaBevyType, + /// Additional custom extension fields. + #[serde(flatten)] + pub extensions: HashMap, +} + +impl From<&RemoteMethods> for Vec { + fn from(value: &RemoteMethods) -> Self { + value + .methods() + .iter() + .map(|e| MethodObject { + name: e.to_owned(), + ..default() + }) + .collect() + } +} diff --git a/crates/bevy_render/Cargo.toml b/crates/bevy_render/Cargo.toml index 1f349202b6..5da61a57dd 100644 --- a/crates/bevy_render/Cargo.toml +++ b/crates/bevy_render/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_render" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides rendering functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -9,9 +9,20 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [features] +# Bevy users should _never_ turn this feature on. +# +# Bevy/wgpu developers can turn this feature on to test a newer version of wgpu without needing to also update naga_oil. +# +# When turning this feature on, you can add the following to bevy/Cargo.toml (not this file), and then run `cargo update`: +# [patch.crates-io] +# wgpu = { git = "https://github.com/gfx-rs/wgpu", rev = "..." } +# wgpu-core = { git = "https://github.com/gfx-rs/wgpu", rev = "..." } +# wgpu-hal = { git = "https://github.com/gfx-rs/wgpu", rev = "..." } +# wgpu-types = { git = "https://github.com/gfx-rs/wgpu", rev = "..." } +decoupled_naga = [] + # Texture formats (require more than just image support) basis-universal = ["bevy_image/basis-universal"] -dds = ["bevy_image/dds"] exr = ["bevy_image/exr"] hdr = ["bevy_image/hdr"] ktx2 = ["dep:ktx2", "bevy_image/ktx2"] @@ -20,6 +31,7 @@ multi_threaded = ["bevy_tasks/multi_threaded"] shader_format_glsl = ["naga/glsl-in", "naga/wgsl-out", "naga_oil/glsl"] shader_format_spirv = ["wgpu/spirv", "naga/spv-in", "naga/spv-out"] +shader_format_wesl = ["wesl"] # Enable SPIR-V shader passthrough spirv_shader_passthrough = ["wgpu/spirv"] @@ -29,11 +41,10 @@ spirv_shader_passthrough = ["wgpu/spirv"] statically-linked-dxc = ["wgpu/static-dxc"] trace = ["profiling"] -tracing-tracy = [] +tracing-tracy = ["dep:tracy-client"] ci_limits = [] webgl = ["wgpu/webgl"] webgpu = ["wgpu/webgpu"] -ios_simulator = [] detailed_trace = [] [dependencies] @@ -49,9 +60,7 @@ bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.16.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } bevy_encase_derive = { path = "../bevy_encase_derive", version = "0.16.0-dev" } bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ - "bevy", -] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } bevy_render_macros = { path = "macros", version = "0.16.0-dev" } bevy_time = { path = "../bevy_time", version = "0.16.0-dev" } bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } @@ -60,7 +69,7 @@ bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev" } bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } bevy_mesh = { path = "../bevy_mesh", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", "serialize", ] } @@ -100,8 +109,11 @@ smallvec = { version = "1.11", features = ["const_new"] } offset-allocator = "0.2" variadics_please = "1.1" tracing = { version = "0.1", default-features = false, features = ["std"] } +tracy-client = { version = "0.18.0", optional = true } indexmap = { version = "2" } fixedbitset = { version = "0.5" } +bitflags = "2" +wesl = { version = "0.1.2", optional = true } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] # Omit the `glsl` feature in non-WebAssembly by default. @@ -125,6 +137,19 @@ web-sys = { version = "0.3.67", features = [ 'Window', ] } wasm-bindgen = "0.2" +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } [target.'cfg(all(target_arch = "wasm32", target_feature = "atomics"))'.dependencies] send_wrapper = "0.6.0" diff --git a/crates/bevy_render/macros/Cargo.toml b/crates/bevy_render/macros/Cargo.toml index 237cc516c5..c3fc40b23e 100644 --- a/crates/bevy_render/macros/Cargo.toml +++ b/crates/bevy_render/macros/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_render_macros" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Derive implementations for bevy_render" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_render/macros/src/as_bind_group.rs b/crates/bevy_render/macros/src/as_bind_group.rs index d5ce58d46c..4252929170 100644 --- a/crates/bevy_render/macros/src/as_bind_group.rs +++ b/crates/bevy_render/macros/src/as_bind_group.rs @@ -3,10 +3,11 @@ use proc_macro::TokenStream; use proc_macro2::{Ident, Span}; use quote::{quote, ToTokens}; use syn::{ + parenthesized, parse::{Parse, ParseStream}, punctuated::Punctuated, - token::Comma, - Data, DataStruct, Error, Fields, Lit, LitInt, LitStr, Meta, MetaList, Result, + token::{Comma, DotDot}, + Data, DataStruct, Error, Fields, LitInt, LitStr, Meta, MetaList, Result, }; const UNIFORM_ATTRIBUTE_NAME: Symbol = Symbol("uniform"); @@ -16,6 +17,12 @@ const SAMPLER_ATTRIBUTE_NAME: Symbol = Symbol("sampler"); const STORAGE_ATTRIBUTE_NAME: Symbol = Symbol("storage"); const BIND_GROUP_DATA_ATTRIBUTE_NAME: Symbol = Symbol("bind_group_data"); const BINDLESS_ATTRIBUTE_NAME: Symbol = Symbol("bindless"); +const DATA_ATTRIBUTE_NAME: Symbol = Symbol("data"); +const BINDING_ARRAY_MODIFIER_NAME: Symbol = Symbol("binding_array"); +const LIMIT_MODIFIER_NAME: Symbol = Symbol("limit"); +const INDEX_TABLE_MODIFIER_NAME: Symbol = Symbol("index_table"); +const RANGE_MODIFIER_NAME: Symbol = Symbol("range"); +const BINDING_MODIFIER_NAME: Symbol = Symbol("binding"); #[derive(Copy, Clone, Debug)] enum BindingType { @@ -39,6 +46,17 @@ enum BindingState<'a> { }, } +enum BindlessSlabResourceLimitAttr { + Auto, + Limit(LitInt), +} + +// The `bindless(index_table(range(M..N)))` attribute. +struct BindlessIndexTableRangeAttr { + start: LitInt, + end: LitInt, +} + pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { let manifest = BevyManifest::shared(); let render_path = manifest.get_path("bevy_render"); @@ -48,14 +66,22 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { let mut binding_states: Vec = Vec::new(); let mut binding_impls = Vec::new(); - let mut binding_layouts = Vec::new(); + let mut bindless_binding_layouts = Vec::new(); + let mut non_bindless_binding_layouts = Vec::new(); + let mut bindless_resource_types = Vec::new(); + let mut bindless_buffer_descriptors = Vec::new(); let mut attr_prepared_data_ident = None; + // After the first attribute pass, this will be `None` if the object isn't + // bindless and `Some` if it is. let mut attr_bindless_count = None; + let mut attr_bindless_index_table_range = None; + let mut attr_bindless_index_table_binding = None; // `actual_bindless_slot_count` holds the actual number of bindless slots // per bind group, taking into account whether the current platform supports // bindless resources. let actual_bindless_slot_count = Ident::new("actual_bindless_slot_count", Span::call_site()); + let bind_group_layout_entries = Ident::new("bind_group_layout_entries", Span::call_site()); // The `BufferBindingType` and corresponding `BufferUsages` used for // uniforms. We need this because bindless uniforms don't exist, so in @@ -63,7 +89,7 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { let uniform_binding_type = Ident::new("uniform_binding_type", Span::call_site()); let uniform_buffer_usages = Ident::new("uniform_buffer_usages", Span::call_site()); - // Read struct-level attributes + // Read struct-level attributes, first pass. for attr in &ast.attrs { if let Some(attr_ident) = attr.path().get_ident() { if attr_ident == BIND_GROUP_DATA_ATTRIBUTE_NAME { @@ -72,35 +98,216 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { { attr_prepared_data_ident = Some(prepared_data_ident); } - } else if attr_ident == UNIFORM_ATTRIBUTE_NAME { - let (binding_index, converted_shader_type) = get_uniform_binding_attr(attr)?; - binding_impls.push(quote! {{ - use #render_path::render_resource::AsBindGroupShaderType; - let mut buffer = #render_path::render_resource::encase::UniformBuffer::new(Vec::new()); - let converted: #converted_shader_type = self.as_bind_group_shader_type(&images); - buffer.write(&converted).unwrap(); - ( - #binding_index, - #render_path::render_resource::OwnedBindingResource::Buffer(render_device.create_buffer_with_data( - &#render_path::render_resource::BufferInitDescriptor { - label: None, - usage: #uniform_buffer_usages, - contents: buffer.as_ref(), - }, - )) - ) - }}); + } else if attr_ident == BINDLESS_ATTRIBUTE_NAME { + attr_bindless_count = Some(BindlessSlabResourceLimitAttr::Auto); + if let Meta::List(_) = attr.meta { + // Parse bindless features. + attr.parse_nested_meta(|submeta| { + if submeta.path.is_ident(&LIMIT_MODIFIER_NAME) { + let content; + parenthesized!(content in submeta.input); + let lit: LitInt = content.parse()?; - binding_layouts.push(quote!{ - #render_path::render_resource::BindGroupLayoutEntry { - binding: #binding_index, - visibility: #render_path::render_resource::ShaderStages::all(), - ty: #render_path::render_resource::BindingType::Buffer { - ty: #uniform_binding_type, - has_dynamic_offset: false, - min_binding_size: Some(<#converted_shader_type as #render_path::render_resource::ShaderType>::min_size()), - }, - count: #actual_bindless_slot_count, + attr_bindless_count = Some(BindlessSlabResourceLimitAttr::Limit(lit)); + return Ok(()); + } + + if submeta.path.is_ident(&INDEX_TABLE_MODIFIER_NAME) { + submeta.parse_nested_meta(|subsubmeta| { + if subsubmeta.path.is_ident(&RANGE_MODIFIER_NAME) { + let content; + parenthesized!(content in subsubmeta.input); + let start: LitInt = content.parse()?; + content.parse::()?; + let end: LitInt = content.parse()?; + attr_bindless_index_table_range = + Some(BindlessIndexTableRangeAttr { start, end }); + return Ok(()); + } + + if subsubmeta.path.is_ident(&BINDING_MODIFIER_NAME) { + let content; + parenthesized!(content in subsubmeta.input); + let lit: LitInt = content.parse()?; + + attr_bindless_index_table_binding = Some(lit); + return Ok(()); + } + + Err(Error::new_spanned( + attr, + "Expected `range(M..N)` or `binding(N)`", + )) + })?; + return Ok(()); + } + + Err(Error::new_spanned( + attr, + "Expected `limit` or `index_table`", + )) + })?; + } + } + } + } + + // Read struct-level attributes, second pass. + for attr in &ast.attrs { + if let Some(attr_ident) = attr.path().get_ident() { + if attr_ident == UNIFORM_ATTRIBUTE_NAME || attr_ident == DATA_ATTRIBUTE_NAME { + let UniformBindingAttr { + binding_type, + binding_index, + converted_shader_type, + binding_array: binding_array_binding, + } = get_uniform_binding_attr(attr)?; + match binding_type { + UniformBindingAttrType::Uniform => { + binding_impls.push(quote! {{ + use #render_path::render_resource::AsBindGroupShaderType; + let mut buffer = #render_path::render_resource::encase::UniformBuffer::new(Vec::new()); + let converted: #converted_shader_type = self.as_bind_group_shader_type(&images); + buffer.write(&converted).unwrap(); + ( + #binding_index, + #render_path::render_resource::OwnedBindingResource::Buffer(render_device.create_buffer_with_data( + &#render_path::render_resource::BufferInitDescriptor { + label: None, + usage: #uniform_buffer_usages, + contents: buffer.as_ref(), + }, + )) + ) + }}); + + match (&binding_array_binding, &attr_bindless_count) { + (&None, &Some(_)) => { + return Err(Error::new_spanned( + attr, + "Must specify `binding_array(...)` with `#[uniform]` if the \ + object is bindless", + )); + } + (&Some(_), &None) => { + return Err(Error::new_spanned( + attr, + "`binding_array(...)` with `#[uniform]` requires the object to \ + be bindless", + )); + } + _ => {} + } + + let binding_array_binding = binding_array_binding.unwrap_or(0); + bindless_binding_layouts.push(quote! { + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_array_binding, + visibility: #render_path::render_resource::ShaderStages::all(), + ty: #render_path::render_resource::BindingType::Buffer { + ty: #uniform_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(<#converted_shader_type as #render_path::render_resource::ShaderType>::min_size()), + }, + count: #actual_bindless_slot_count, + } + ); + }); + + add_bindless_resource_type( + &render_path, + &mut bindless_resource_types, + binding_index, + quote! { #render_path::render_resource::BindlessResourceType::Buffer }, + ); + } + + UniformBindingAttrType::Data => { + binding_impls.push(quote! {{ + use #render_path::render_resource::AsBindGroupShaderType; + use #render_path::render_resource::encase::{ShaderType, internal::WriteInto}; + let mut buffer: Vec = Vec::new(); + let converted: #converted_shader_type = self.as_bind_group_shader_type(&images); + converted.write_into( + &mut #render_path::render_resource::encase::internal::Writer::new( + &converted, + &mut buffer, + 0, + ).unwrap(), + ); + let min_size = <#converted_shader_type as #render_path::render_resource::ShaderType>::min_size().get() as usize; + while buffer.len() < min_size { + buffer.push(0); + } + ( + #binding_index, + #render_path::render_resource::OwnedBindingResource::Data( + #render_path::render_resource::OwnedData(buffer) + ) + ) + }}); + + let binding_array_binding = binding_array_binding.unwrap_or(0); + bindless_binding_layouts.push(quote! { + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_array_binding, + visibility: #render_path::render_resource::ShaderStages::all(), + ty: #render_path::render_resource::BindingType::Buffer { + ty: #uniform_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(<#converted_shader_type as #render_path::render_resource::ShaderType>::min_size()), + }, + count: None, + } + ); + }); + + add_bindless_resource_type( + &render_path, + &mut bindless_resource_types, + binding_index, + quote! { #render_path::render_resource::BindlessResourceType::DataBuffer }, + ); + } + } + + // Push the non-bindless binding layout. + + non_bindless_binding_layouts.push(quote!{ + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_index, + visibility: #render_path::render_resource::ShaderStages::all(), + ty: #render_path::render_resource::BindingType::Buffer { + ty: #uniform_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(<#converted_shader_type as #render_path::render_resource::ShaderType>::min_size()), + }, + count: None, + } + ); + }); + + bindless_buffer_descriptors.push(quote! { + #render_path::render_resource::BindlessBufferDescriptor { + // Note that, because this is bindless, *binding + // index* here refers to the index in the + // bindless index table (`bindless_index`), and + // the actual binding number is the *binding + // array binding*. + binding_number: #render_path::render_resource::BindingNumber( + #binding_array_binding + ), + bindless_index: + #render_path::render_resource::BindlessIndex(#binding_index), + size: Some( + < + #converted_shader_type as + #render_path::render_resource::ShaderType + >::min_size().get() as usize + ), } }); @@ -109,12 +316,6 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { binding_states.resize(required_len, BindingState::Free); } binding_states[binding_index as usize] = BindingState::OccupiedConvertedUniform; - } else if attr_ident == BINDLESS_ATTRIBUTE_NAME { - if let Ok(count_lit) = - attr.parse_args_with(|input: ParseStream| input.parse::()) - { - attr_bindless_count = Some(count_lit); - } } } } @@ -135,7 +336,7 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { // Count the number of sampler fields needed. We might have to disable // bindless if bindless arrays take the GPU over the maximum number of // samplers. - let mut sampler_binding_count = 0; + let mut sampler_binding_count: u32 = 0; // Read field-level attributes for field in fields { @@ -225,11 +426,21 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { match binding_type { BindingType::Uniform => { + if attr_bindless_count.is_some() { + return Err(Error::new_spanned( + attr, + "Only structure-level `#[uniform]` attributes are supported in \ + bindless mode", + )); + } + // uniform codegen is deferred to account for combined uniform bindings } + BindingType::Storage => { let StorageAttrs { visibility, + binding_array: binding_array_binding, read_only, buffer, } = get_storage_binding_attr(nested_meta_items)?; @@ -259,20 +470,78 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { }); } - binding_layouts.push(quote! { - #render_path::render_resource::BindGroupLayoutEntry { - binding: #binding_index, - visibility: #visibility, - ty: #render_path::render_resource::BindingType::Buffer { - ty: #render_path::render_resource::BufferBindingType::Storage { read_only: #read_only }, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: #actual_bindless_slot_count, - } + non_bindless_binding_layouts.push(quote! { + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_index, + visibility: #visibility, + ty: #render_path::render_resource::BindingType::Buffer { + ty: #render_path::render_resource::BufferBindingType::Storage { read_only: #read_only }, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: #actual_bindless_slot_count, + } + ); }); + + if let Some(binding_array_binding) = binding_array_binding { + // Add the storage buffer to the `BindlessResourceType` list + // in the bindless descriptor. + let bindless_resource_type = quote! { + #render_path::render_resource::BindlessResourceType::Buffer + }; + add_bindless_resource_type( + &render_path, + &mut bindless_resource_types, + binding_index, + bindless_resource_type, + ); + + // Push the buffer descriptor. + bindless_buffer_descriptors.push(quote! { + #render_path::render_resource::BindlessBufferDescriptor { + // Note that, because this is bindless, *binding + // index* here refers to the index in the bindless + // index table (`bindless_index`), and the actual + // binding number is the *binding array binding*. + binding_number: #render_path::render_resource::BindingNumber( + #binding_array_binding + ), + bindless_index: + #render_path::render_resource::BindlessIndex(#binding_index), + size: None, + } + }); + + // Declare the binding array. + bindless_binding_layouts.push(quote!{ + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_array_binding, + visibility: #render_path::render_resource::ShaderStages::all(), + ty: #render_path::render_resource::BindingType::Buffer { + ty: #render_path::render_resource::BufferBindingType::Storage { + read_only: #read_only + }, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: #actual_bindless_slot_count, + } + ); + }); + } } + BindingType::StorageTexture => { + if attr_bindless_count.is_some() { + return Err(Error::new_spanned( + attr, + "Storage textures are unsupported in bindless mode", + )); + } + let StorageTextureAttrs { dimension, image_format, @@ -289,7 +558,7 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { binding_impls.insert(0, quote! { ( #binding_index, #render_path::render_resource::OwnedBindingResource::TextureView( - #dimension, + #render_path::render_resource::#dimension, { let handle: Option<&#asset_path::Handle<#image_path::Image>> = (&self.#field_name).into(); if let Some(handle) = handle { @@ -302,19 +571,22 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { ) }); - binding_layouts.push(quote! { - #render_path::render_resource::BindGroupLayoutEntry { - binding: #binding_index, - visibility: #visibility, - ty: #render_path::render_resource::BindingType::StorageTexture { - access: #render_path::render_resource::StorageTextureAccess::#access, - format: #render_path::render_resource::TextureFormat::#image_format, - view_dimension: #render_path::render_resource::#dimension, - }, - count: #actual_bindless_slot_count, - } + non_bindless_binding_layouts.push(quote! { + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_index, + visibility: #visibility, + ty: #render_path::render_resource::BindingType::StorageTexture { + access: #render_path::render_resource::StorageTextureAccess::#access, + format: #render_path::render_resource::TextureFormat::#image_format, + view_dimension: #render_path::render_resource::#dimension, + }, + count: #actual_bindless_slot_count, + } + ); }); } + BindingType::Texture => { let TextureAttrs { dimension, @@ -348,19 +620,64 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { sampler_binding_count += 1; - binding_layouts.push(quote! { - #render_path::render_resource::BindGroupLayoutEntry { - binding: #binding_index, - visibility: #visibility, - ty: #render_path::render_resource::BindingType::Texture { - multisampled: #multisampled, - sample_type: #render_path::render_resource::#sample_type, - view_dimension: #render_path::render_resource::#dimension, - }, - count: #actual_bindless_slot_count, - } + non_bindless_binding_layouts.push(quote! { + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_index, + visibility: #visibility, + ty: #render_path::render_resource::BindingType::Texture { + multisampled: #multisampled, + sample_type: #render_path::render_resource::#sample_type, + view_dimension: #render_path::render_resource::#dimension, + }, + count: #actual_bindless_slot_count, + } + ); }); + + let bindless_resource_type = match *dimension { + BindingTextureDimension::D1 => { + quote! { + #render_path::render_resource::BindlessResourceType::Texture1d + } + } + BindingTextureDimension::D2 => { + quote! { + #render_path::render_resource::BindlessResourceType::Texture2d + } + } + BindingTextureDimension::D2Array => { + quote! { + #render_path::render_resource::BindlessResourceType::Texture2dArray + } + } + BindingTextureDimension::Cube => { + quote! { + #render_path::render_resource::BindlessResourceType::TextureCube + } + } + BindingTextureDimension::CubeArray => { + quote! { + #render_path::render_resource::BindlessResourceType::TextureCubeArray + } + } + BindingTextureDimension::D3 => { + quote! { + #render_path::render_resource::BindlessResourceType::Texture3d + } + } + }; + + // Add the texture to the `BindlessResourceType` list in the + // bindless descriptor. + add_bindless_resource_type( + &render_path, + &mut bindless_resource_types, + binding_index, + bindless_resource_type, + ); } + BindingType::Sampler => { let SamplerAttrs { sampler_binding_type, @@ -394,7 +711,10 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { binding_impls.insert(0, quote! { ( #binding_index, - #render_path::render_resource::OwnedBindingResource::Sampler({ + #render_path::render_resource::OwnedBindingResource::Sampler( + // TODO: Support other types. + #render_path::render_resource::WgpuSamplerBindingType::Filtering, + { let handle: Option<&#asset_path::Handle<#image_path::Image>> = (&self.#field_name).into(); if let Some(handle) = handle { let image = images.get(handle).ok_or_else(|| #render_path::render_resource::AsBindGroupError::RetryNextUpdate)?; @@ -426,14 +746,29 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { sampler_binding_count += 1; - binding_layouts.push(quote!{ - #render_path::render_resource::BindGroupLayoutEntry { - binding: #binding_index, - visibility: #visibility, - ty: #render_path::render_resource::BindingType::Sampler(#render_path::render_resource::#sampler_binding_type), - count: #actual_bindless_slot_count, - } + non_bindless_binding_layouts.push(quote!{ + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_index, + visibility: #visibility, + ty: #render_path::render_resource::BindingType::Sampler(#render_path::render_resource::#sampler_binding_type), + count: #actual_bindless_slot_count, + } + ); }); + + // Add the sampler to the `BindlessResourceType` list in the + // bindless descriptor. + // + // TODO: Support other types of samplers. + add_bindless_resource_type( + &render_path, + &mut bindless_resource_types, + binding_index, + quote! { + #render_path::render_resource::BindlessResourceType::SamplerFiltering + }, + ); } } } @@ -495,17 +830,19 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { ) }}); - binding_layouts.push(quote!{ - #render_path::render_resource::BindGroupLayoutEntry { - binding: #binding_index, - visibility: #render_path::render_resource::ShaderStages::all(), - ty: #render_path::render_resource::BindingType::Buffer { - ty: #uniform_binding_type, - has_dynamic_offset: false, - min_binding_size: Some(<#field_ty as #render_path::render_resource::ShaderType>::min_size()), - }, - count: actual_bindless_slot_count, - } + non_bindless_binding_layouts.push(quote!{ + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_index, + visibility: #render_path::render_resource::ShaderStages::all(), + ty: #render_path::render_resource::BindingType::Buffer { + ty: #uniform_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(<#field_ty as #render_path::render_resource::ShaderType>::min_size()), + }, + count: #actual_bindless_slot_count, + } + ); }); // multi-field uniform bindings for a given index require an intermediate struct to derive ShaderType } else { @@ -541,8 +878,8 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { ) }}); - binding_layouts.push(quote!{ - #render_path::render_resource::BindGroupLayoutEntry { + non_bindless_binding_layouts.push(quote!{ + #bind_group_layout_entries.push(#render_path::render_resource::BindGroupLayoutEntry { binding: #binding_index, visibility: #render_path::render_resource::ShaderStages::all(), ty: #render_path::render_resource::BindingType::Buffer { @@ -550,8 +887,8 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { has_dynamic_offset: false, min_binding_size: Some(<#uniform_struct_name as #render_path::render_resource::ShaderType>::min_size()), }, - count: actual_bindless_slot_count, - } + count: #actual_bindless_slot_count, + }); }); } } @@ -571,46 +908,134 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { // Calculate the number of samplers that we need, so that we don't go over // the limit on certain platforms. See // https://github.com/bevyengine/bevy/issues/16988. - let samplers_needed = match attr_bindless_count { - Some(Lit::Int(ref bindless_count)) => match bindless_count.base10_parse::() { - Ok(bindless_count) => sampler_binding_count * bindless_count, - Err(_) => 0, - }, - _ => 0, + let bindless_count_syntax = match attr_bindless_count { + Some(BindlessSlabResourceLimitAttr::Auto) => { + quote! { #render_path::render_resource::AUTO_BINDLESS_SLAB_RESOURCE_LIMIT } + } + Some(BindlessSlabResourceLimitAttr::Limit(ref count)) => { + quote! { #count } + } + None => quote! { 0 }, + }; + + // Calculate the actual bindless index table range, taking the + // `#[bindless(index_table(range(M..N)))]` attribute into account. + let bindless_index_table_range = match attr_bindless_index_table_range { + None => { + let resource_count = bindless_resource_types.len() as u32; + quote! { + #render_path::render_resource::BindlessIndex(0).. + #render_path::render_resource::BindlessIndex(#resource_count) + } + } + Some(BindlessIndexTableRangeAttr { start, end }) => { + quote! { + #render_path::render_resource::BindlessIndex(#start).. + #render_path::render_resource::BindlessIndex(#end) + } + } + }; + + // Calculate the actual binding number of the bindless index table, taking + // the `#[bindless(index_table(binding(B)))]` into account. + let bindless_index_table_binding_number = match attr_bindless_index_table_binding { + None => quote! { #render_path::render_resource::BindingNumber(0) }, + Some(binding_number) => { + quote! { #render_path::render_resource::BindingNumber(#binding_number) } + } }; // Calculate the actual number of bindless slots, taking hardware // limitations into account. - let (bindless_slot_count, actual_bindless_slot_count_declaration) = match attr_bindless_count { - Some(bindless_count) => ( - quote! { - fn bindless_slot_count() -> Option { - Some(#bindless_count) - } - - fn bindless_supported(render_device: &#render_path::renderer::RenderDevice) -> bool { - render_device.features().contains( - #render_path::settings::WgpuFeatures::BUFFER_BINDING_ARRAY | - #render_path::settings::WgpuFeatures::TEXTURE_BINDING_ARRAY - ) && - render_device.limits().max_storage_buffers_per_shader_stage > 0 && - render_device.limits().max_samplers_per_shader_stage >= #samplers_needed - } - }, - quote! { - let #actual_bindless_slot_count = if Self::bindless_supported(render_device) && - !force_no_bindless { - ::core::num::NonZeroU32::new(#bindless_count) - } else { - None + let (bindless_slot_count, actual_bindless_slot_count_declaration, bindless_descriptor_syntax) = + match attr_bindless_count { + Some(ref bindless_count) => { + let bindless_supported_syntax = quote! { + fn bindless_supported( + render_device: &#render_path::renderer::RenderDevice + ) -> bool { + render_device.features().contains( + #render_path::settings::WgpuFeatures::BUFFER_BINDING_ARRAY | + #render_path::settings::WgpuFeatures::TEXTURE_BINDING_ARRAY + ) && + render_device.limits().max_storage_buffers_per_shader_stage > 0 && + render_device.limits().max_samplers_per_shader_stage >= + (#sampler_binding_count * #bindless_count_syntax) + } }; - }, - ), - None => ( - TokenStream::new().into(), - quote! { let #actual_bindless_slot_count: Option<::core::num::NonZeroU32> = None; }, - ), - }; + let actual_bindless_slot_count_declaration = quote! { + let #actual_bindless_slot_count = if Self::bindless_supported(render_device) && + !force_no_bindless { + ::core::num::NonZeroU32::new(#bindless_count_syntax) + } else { + None + }; + }; + let bindless_slot_count_declaration = match bindless_count { + BindlessSlabResourceLimitAttr::Auto => { + quote! { + fn bindless_slot_count() -> Option< + #render_path::render_resource::BindlessSlabResourceLimit + > { + Some(#render_path::render_resource::BindlessSlabResourceLimit::Auto) + } + } + } + BindlessSlabResourceLimitAttr::Limit(lit) => { + quote! { + fn bindless_slot_count() -> Option< + #render_path::render_resource::BindlessSlabResourceLimit + > { + Some(#render_path::render_resource::BindlessSlabResourceLimit::Custom(#lit)) + } + } + } + }; + + let bindless_buffer_descriptor_count = bindless_buffer_descriptors.len(); + + // We use `LazyLock` so that we can call `min_size`, which isn't + // a `const fn`. + let bindless_descriptor_syntax = quote! { + static RESOURCES: &[#render_path::render_resource::BindlessResourceType] = &[ + #(#bindless_resource_types),* + ]; + static BUFFERS: ::std::sync::LazyLock<[ + #render_path::render_resource::BindlessBufferDescriptor; + #bindless_buffer_descriptor_count + ]> = ::std::sync::LazyLock::new(|| { + [#(#bindless_buffer_descriptors),*] + }); + static INDEX_TABLES: &[ + #render_path::render_resource::BindlessIndexTableDescriptor + ] = &[ + #render_path::render_resource::BindlessIndexTableDescriptor { + indices: #bindless_index_table_range, + binding_number: #bindless_index_table_binding_number, + } + ]; + Some(#render_path::render_resource::BindlessDescriptor { + resources: ::std::borrow::Cow::Borrowed(RESOURCES), + buffers: ::std::borrow::Cow::Borrowed(&*BUFFERS), + index_tables: ::std::borrow::Cow::Borrowed(&*INDEX_TABLES), + }) + }; + + ( + quote! { + #bindless_slot_count_declaration + #bindless_supported_syntax + }, + actual_bindless_slot_count_declaration, + bindless_descriptor_syntax, + ) + } + None => ( + TokenStream::new().into(), + quote! { let #actual_bindless_slot_count: Option<::core::num::NonZeroU32> = None; }, + quote! { None }, + ), + }; Ok(TokenStream::from(quote! { #(#field_struct_impls)* @@ -654,12 +1079,57 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { #actual_bindless_slot_count_declaration #uniform_binding_type_declarations - vec![#(#binding_layouts,)*] + let mut #bind_group_layout_entries = Vec::new(); + match #actual_bindless_slot_count { + Some(bindless_slot_count) => { + let bindless_index_table_range = #bindless_index_table_range; + #bind_group_layout_entries.extend( + #render_path::render_resource::create_bindless_bind_group_layout_entries( + bindless_index_table_range.end.0 - + bindless_index_table_range.start.0, + bindless_slot_count.into(), + #bindless_index_table_binding_number, + ).into_iter() + ); + #(#bindless_binding_layouts)*; + } + None => { + #(#non_bindless_binding_layouts)*; + } + }; + #bind_group_layout_entries + } + + fn bindless_descriptor() -> Option<#render_path::render_resource::BindlessDescriptor> { + #bindless_descriptor_syntax } } })) } +/// Adds a bindless resource type to the `BindlessResourceType` array in the +/// bindless descriptor we're building up. +/// +/// See the `bevy_render::render_resource::bindless::BindlessResourceType` +/// documentation for more information. +fn add_bindless_resource_type( + render_path: &syn::Path, + bindless_resource_types: &mut Vec, + binding_index: u32, + bindless_resource_type: proc_macro2::TokenStream, +) { + // If we need to grow the array, pad the unused fields with + // `BindlessResourceType::None`. + if bindless_resource_types.len() < (binding_index as usize + 1) { + bindless_resource_types.resize_with(binding_index as usize + 1, || { + quote! { #render_path::render_resource::BindlessResourceType::None } + }); + } + + // Assign the `BindlessResourceType`. + bindless_resource_types[binding_index as usize] = bindless_resource_type; +} + fn get_fallback_image( render_path: &syn::Path, dimension: BindingTextureDimension, @@ -682,8 +1152,34 @@ fn get_fallback_image( /// like `#[uniform(LitInt, Ident)]` struct UniformBindingMeta { lit_int: LitInt, - _comma: Comma, ident: Ident, + binding_array: Option, +} + +/// The parsed structure-level `#[uniform]` or `#[data]` attribute. +/// +/// The corresponding syntax is `#[uniform(BINDING_INDEX, CONVERTED_SHADER_TYPE, +/// binding_array(BINDING_ARRAY)]`, optionally replacing `uniform` with `data`. +struct UniformBindingAttr { + /// Whether the declaration is `#[uniform]` or `#[data]`. + binding_type: UniformBindingAttrType, + /// The binding index. + binding_index: u32, + /// The uniform data type. + converted_shader_type: Ident, + /// The binding number of the binding array, if this is a bindless material. + binding_array: Option, +} + +/// Whether a structure-level shader type declaration is `#[uniform]` or +/// `#[data]`. +enum UniformBindingAttrType { + /// `#[uniform]`: i.e. in bindless mode, we need a separate buffer per data + /// instance. + Uniform, + /// `#[data]`: i.e. in bindless mode, we concatenate all instance data into + /// a single buffer. + Data, } /// Represents the arguments for any general binding attribute. @@ -725,22 +1221,62 @@ impl Parse for BindingIndexOptions { } impl Parse for UniformBindingMeta { + // Parse syntax like `#[uniform(0, StandardMaterial, binding_array(10))]`. fn parse(input: ParseStream) -> Result { + let lit_int = input.parse()?; + input.parse::()?; + let ident = input.parse()?; + + // Look for a `binding_array(BINDING_NUMBER)` declaration. + let mut binding_array: Option = None; + if input.parse::().is_ok() { + if input + .parse::()? + .get_ident() + .is_none_or(|ident| *ident != BINDING_ARRAY_MODIFIER_NAME) + { + return Err(Error::new_spanned(ident, "Expected `binding_array`")); + } + let parser; + parenthesized!(parser in input); + binding_array = Some(parser.parse()?); + } + Ok(Self { - lit_int: input.parse()?, - _comma: input.parse()?, - ident: input.parse()?, + lit_int, + ident, + binding_array, }) } } -fn get_uniform_binding_attr(attr: &syn::Attribute) -> Result<(u32, Ident)> { +/// Parses a structure-level `#[uniform]` attribute (not a field-level +/// `#[uniform]` attribute). +fn get_uniform_binding_attr(attr: &syn::Attribute) -> Result { + let attr_ident = attr + .path() + .get_ident() + .expect("Shouldn't be here if we didn't have an attribute"); + let uniform_binding_meta = attr.parse_args_with(UniformBindingMeta::parse)?; let binding_index = uniform_binding_meta.lit_int.base10_parse()?; let ident = uniform_binding_meta.ident; + let binding_array = match uniform_binding_meta.binding_array { + None => None, + Some(binding_array) => Some(binding_array.base10_parse()?), + }; - Ok((binding_index, ident)) + Ok(UniformBindingAttr { + binding_type: if attr_ident == UNIFORM_ATTRIBUTE_NAME { + UniformBindingAttrType::Uniform + } else { + UniformBindingAttrType::Data + }, + binding_index, + converted_shader_type: ident, + binding_array, + }) } fn get_binding_nested_attr(attr: &syn::Attribute) -> Result<(u32, Vec)> { @@ -874,6 +1410,14 @@ fn get_visibility_flag_value(meta_list: &MetaList) -> Result Result { + meta_list + .parse_args_with(|input: ParseStream| input.parse::())? + .base10_parse() +} + #[derive(Clone, Copy, Default)] enum BindingTextureDimension { D1, @@ -1214,6 +1758,7 @@ fn get_sampler_binding_type_value(lit_str: &LitStr) -> Result, read_only: bool, buffer: bool, } @@ -1223,6 +1768,7 @@ const BUFFER: Symbol = Symbol("buffer"); fn get_storage_binding_attr(metas: Vec) -> Result { let mut visibility = ShaderStageVisibility::vertex_fragment(); + let mut binding_array = None; let mut read_only = false; let mut buffer = false; @@ -1233,6 +1779,10 @@ fn get_storage_binding_attr(metas: Vec) -> Result { List(m) if m.path == VISIBILITY => { visibility = get_visibility_flag_value(&m)?; } + // Parse #[storage(0, binding_array(...))] for bindless mode. + List(m) if m.path == BINDING_ARRAY_MODIFIER_NAME => { + binding_array = Some(get_binding_array_flag_value(&m)?); + } Path(path) if path == READ_ONLY => { read_only = true; } @@ -1250,6 +1800,7 @@ fn get_storage_binding_attr(metas: Vec) -> Result { Ok(StorageAttrs { visibility, + binding_array, read_only, buffer, }) diff --git a/crates/bevy_render/macros/src/lib.rs b/crates/bevy_render/macros/src/lib.rs index 15b1add6b2..7a04932bcd 100644 --- a/crates/bevy_render/macros/src/lib.rs +++ b/crates/bevy_render/macros/src/lib.rs @@ -60,7 +60,8 @@ pub fn derive_extract_component(input: TokenStream) -> TokenStream { sampler, bind_group_data, storage, - bindless + bindless, + data ) )] pub fn derive_as_bind_group(input: TokenStream) -> TokenStream { diff --git a/crates/bevy_render/src/alpha.rs b/crates/bevy_render/src/alpha.rs index 12e1377eab..dd74881119 100644 --- a/crates/bevy_render/src/alpha.rs +++ b/crates/bevy_render/src/alpha.rs @@ -3,7 +3,7 @@ use bevy_reflect::{std_traits::ReflectDefault, Reflect}; // TODO: add discussion about performance. /// Sets how a material's base color alpha channel is used for transparency. #[derive(Debug, Default, Reflect, Copy, Clone, PartialEq)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub enum AlphaMode { /// Base color alpha values are overridden to be fully opaque (1.0). #[default] diff --git a/crates/bevy_render/src/batching/gpu_preprocessing.rs b/crates/bevy_render/src/batching/gpu_preprocessing.rs index ccf227fd74..7de55ee022 100644 --- a/crates/bevy_render/src/batching/gpu_preprocessing.rs +++ b/crates/bevy_render/src/batching/gpu_preprocessing.rs @@ -1,49 +1,50 @@ //! Batching functionality when GPU preprocessing is in use. -use core::any::TypeId; +use core::{any::TypeId, marker::PhantomData, mem}; use bevy_app::{App, Plugin}; +use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ prelude::Entity, query::{Has, With}, resource::Resource, - schedule::IntoSystemConfigs as _, + schedule::IntoScheduleConfigs as _, system::{Query, Res, ResMut, StaticSystemParam}, world::{FromWorld, World}, }; use bevy_encase_derive::ShaderType; use bevy_math::UVec4; -use bevy_platform_support::collections::{hash_map::Entry, HashMap, HashSet}; +use bevy_platform::collections::{hash_map::Entry, HashMap, HashSet}; use bevy_utils::{default, TypeIdMap}; use bytemuck::{Pod, Zeroable}; +use encase::{internal::WriteInto, ShaderSize}; +use indexmap::IndexMap; use nonmax::NonMaxU32; -use tracing::error; +use tracing::{error, info}; use wgpu::{BindingResource, BufferUsages, DownlevelFlags, Features}; use crate::{ experimental::occlusion_culling::OcclusionCulling, render_phase::{ BinnedPhaseItem, BinnedRenderPhaseBatch, BinnedRenderPhaseBatchSet, - BinnedRenderPhaseBatchSets, CachedRenderPipelinePhaseItem, PhaseItemBatchSetKey as _, - PhaseItemExtraIndex, SortedPhaseItem, SortedRenderPhase, UnbatchableBinnedEntityIndices, - ViewBinnedRenderPhases, ViewSortedRenderPhases, + BinnedRenderPhaseBatchSets, CachedRenderPipelinePhaseItem, PhaseItem, + PhaseItemBatchSetKey as _, PhaseItemExtraIndex, RenderBin, SortedPhaseItem, + SortedRenderPhase, UnbatchableBinnedEntityIndices, ViewBinnedRenderPhases, + ViewSortedRenderPhases, }, - render_resource::{Buffer, BufferVec, GpuArrayBufferable, RawBufferVec, UninitBufferVec}, + render_resource::{Buffer, GpuArrayBufferable, RawBufferVec, UninitBufferVec}, renderer::{RenderAdapter, RenderDevice, RenderQueue}, + sync_world::MainEntity, view::{ExtractedView, NoIndirectDrawing, RetainedViewEntity}, - Render, RenderApp, RenderSet, + Render, RenderApp, RenderDebugFlags, RenderSet, }; use super::{BatchMeta, GetBatchData, GetFullBatchData}; #[derive(Default)] pub struct BatchingPlugin { - /// If true, this sets the `COPY_SRC` flag on indirect draw parameters so - /// that they can be read back to CPU. - /// - /// This is a debugging feature that may reduce performance. It primarily - /// exists for the `occlusion_culling` example. - pub allow_copies_from_indirect_parameters: bool, + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, } impl Plugin for BatchingPlugin { @@ -54,7 +55,8 @@ impl Plugin for BatchingPlugin { render_app .insert_resource(IndirectParametersBuffers::new( - self.allow_copies_from_indirect_parameters, + self.debug_flags + .contains(RenderDebugFlags::ALLOW_COPIES_FROM_INDIRECT_PARAMETERS), )) .add_systems( Render, @@ -109,6 +111,11 @@ impl GpuPreprocessingSupport { } } } + + /// Returns true if GPU culling is supported on this platform. + pub fn is_culling_supported(&self) -> bool { + self.max_supported_mode == GpuPreprocessingMode::Culling + } } /// The amount of GPU preprocessing (compute and indirect draw) that we do. @@ -147,18 +154,6 @@ where BD: GpuArrayBufferable + Sync + Send + 'static, BDI: Pod + Default, { - /// A storage area for the buffer data that the GPU compute shader is - /// expected to write to. - /// - /// There will be one entry for each index. - pub data_buffer: UninitBufferVec, - - /// The index of the buffer data in the current input buffer that - /// corresponds to each instance. - /// - /// This is keyed off each view. Each view has a separate buffer. - pub work_item_buffers: HashMap>, - /// The uniform data inputs for the current frame. /// /// These are uploaded during the extraction phase. @@ -173,6 +168,81 @@ where /// corresponding buffer data input uniform in this list. pub previous_input_buffer: InstanceInputUniformBuffer, + /// The data needed to render buffers for each phase. + /// + /// The keys of this map are the type IDs of each phase: e.g. `Opaque3d`, + /// `AlphaMask3d`, etc. + pub phase_instance_buffers: TypeIdMap>, +} + +impl Default for BatchedInstanceBuffers +where + BD: GpuArrayBufferable + Sync + Send + 'static, + BDI: Pod + Sync + Send + Default + 'static, +{ + fn default() -> Self { + BatchedInstanceBuffers { + current_input_buffer: InstanceInputUniformBuffer::new(), + previous_input_buffer: InstanceInputUniformBuffer::new(), + phase_instance_buffers: HashMap::default(), + } + } +} + +/// The GPU buffers holding the data needed to render batches for a single +/// phase. +/// +/// These are split out per phase so that we can run the phases in parallel. +/// This is the version of the structure that has a type parameter, which +/// enables Bevy's scheduler to run the batching operations for the different +/// phases in parallel. +/// +/// See the documentation for [`BatchedInstanceBuffers`] for more information. +#[derive(Resource)] +pub struct PhaseBatchedInstanceBuffers +where + PI: PhaseItem, + BD: GpuArrayBufferable + Sync + Send + 'static, +{ + /// The buffers for this phase. + pub buffers: UntypedPhaseBatchedInstanceBuffers, + phantom: PhantomData, +} + +impl Default for PhaseBatchedInstanceBuffers +where + PI: PhaseItem, + BD: GpuArrayBufferable + Sync + Send + 'static, +{ + fn default() -> Self { + PhaseBatchedInstanceBuffers { + buffers: UntypedPhaseBatchedInstanceBuffers::default(), + phantom: PhantomData, + } + } +} + +/// The GPU buffers holding the data needed to render batches for a single +/// phase, without a type parameter for that phase. +/// +/// Since this structure doesn't have a type parameter, it can be placed in +/// [`BatchedInstanceBuffers::phase_instance_buffers`]. +pub struct UntypedPhaseBatchedInstanceBuffers +where + BD: GpuArrayBufferable + Sync + Send + 'static, +{ + /// A storage area for the buffer data that the GPU compute shader is + /// expected to write to. + /// + /// There will be one entry for each index. + pub data_buffer: UninitBufferVec, + + /// The index of the buffer data in the current input buffer that + /// corresponds to each instance. + /// + /// This is keyed off each view. Each view has a separate buffer. + pub work_item_buffers: HashMap, + /// A buffer that holds the number of indexed meshes that weren't visible in /// the previous frame, when GPU occlusion culling is in use. /// @@ -327,7 +397,7 @@ pub enum PreprocessWorkItemBuffers { /// /// Because we don't have to separate indexed from non-indexed meshes in /// direct mode, we only have a single buffer here. - Direct(BufferVec), + Direct(RawBufferVec), /// The buffer of work items we use if we are using indirect drawing. /// @@ -336,9 +406,9 @@ pub enum PreprocessWorkItemBuffers { /// different sizes. Indirect { /// The buffer of work items corresponding to indexed meshes. - indexed: BufferVec, + indexed: RawBufferVec, /// The buffer of work items corresponding to non-indexed meshes. - non_indexed: BufferVec, + non_indexed: RawBufferVec, /// The work item buffers we use when GPU occlusion culling is in use. gpu_occlusion_culling: Option, }, @@ -351,11 +421,11 @@ pub struct GpuOcclusionCullingWorkItemBuffers { /// The buffer of work items corresponding to non-indexed meshes. pub late_non_indexed: UninitBufferVec, /// The offset into the - /// [`BatchedInstanceBuffers::late_indexed_indirect_parameters_buffer`] + /// [`UntypedPhaseBatchedInstanceBuffers::late_indexed_indirect_parameters_buffer`] /// where this view's indirect dispatch counts for indexed meshes live. pub late_indirect_parameters_indexed_offset: u32, /// The offset into the - /// [`BatchedInstanceBuffers::late_non_indexed_indirect_parameters_buffer`] + /// [`UntypedPhaseBatchedInstanceBuffers::late_non_indexed_indirect_parameters_buffer`] /// where this view's indirect dispatch counts for non-indexed meshes live. pub late_indirect_parameters_non_indexed_offset: u32, } @@ -409,7 +479,7 @@ impl Default for LatePreprocessWorkItemIndirectParameters { /// You may need to call this function if you're implementing your own custom /// render phases. See the `specialized_mesh_pipeline` example. pub fn get_or_create_work_item_buffer<'a, I>( - work_item_buffers: &'a mut HashMap>, + work_item_buffers: &'a mut HashMap, view: RetainedViewEntity, no_indirect_drawing: bool, enable_gpu_occlusion_culling: bool, @@ -417,21 +487,17 @@ pub fn get_or_create_work_item_buffer<'a, I>( where I: 'static, { - let preprocess_work_item_buffers = match work_item_buffers - .entry(view) - .or_default() - .entry(TypeId::of::()) - { + let preprocess_work_item_buffers = match work_item_buffers.entry(view) { Entry::Occupied(occupied_entry) => occupied_entry.into_mut(), Entry::Vacant(vacant_entry) => { if no_indirect_drawing { - vacant_entry.insert(PreprocessWorkItemBuffers::Direct(BufferVec::new( + vacant_entry.insert(PreprocessWorkItemBuffers::Direct(RawBufferVec::new( BufferUsages::STORAGE, ))) } else { vacant_entry.insert(PreprocessWorkItemBuffers::Indirect { - indexed: BufferVec::new(BufferUsages::STORAGE), - non_indexed: BufferVec::new(BufferUsages::STORAGE), + indexed: RawBufferVec::new(BufferUsages::STORAGE), + non_indexed: RawBufferVec::new(BufferUsages::STORAGE), // We fill this in below if `enable_gpu_occlusion_culling` // is set. gpu_occlusion_culling: None, @@ -564,14 +630,15 @@ pub struct PreprocessWorkItem { /// The index of the batch input data in the input buffer that the shader /// reads from. pub input_index: u32, - /// The index of the `MeshUniform` in the output buffer that we write to. - /// In direct mode, this is the index of the uniform. In indirect mode, this - /// is the first index uniform in the batch set. - pub output_index: u32, - /// The index of the [`IndirectParametersMetadata`] in the + + /// In direct mode, the index of the mesh uniform; in indirect mode, the + /// index of the [`IndirectParametersGpuMetadata`]. + /// + /// In indirect mode, this is the index of the + /// [`IndirectParametersGpuMetadata`] in the /// `IndirectParametersBuffers::indexed_metadata` or /// `IndirectParametersBuffers::non_indexed_metadata`. - pub indirect_parameters_index: u32, + pub output_or_indirect_parameters_index: u32, } /// The `wgpu` indirect parameters structure that specifies a GPU draw command. @@ -610,23 +677,13 @@ pub struct IndirectParametersNonIndexed { pub first_instance: u32, } -/// A structure, shared between CPU and GPU, that records how many instances of -/// each mesh are actually to be drawn. -/// -/// The CPU writes to this structure in order to initialize the fields other -/// than [`Self::early_instance_count`] and [`Self::late_instance_count`]. The -/// GPU mesh preprocessing shader increments the [`Self::early_instance_count`] -/// and [`Self::late_instance_count`] as it determines that meshes are visible. -/// The indirect parameter building shader reads this metadata in order to -/// construct the indirect draw parameters. +/// A structure, initialized on CPU and read on GPU, that contains metadata +/// about each batch. /// /// Each batch will have one instance of this structure. #[derive(Clone, Copy, Default, Pod, Zeroable, ShaderType)] #[repr(C)] -pub struct IndirectParametersMetadata { - /// The index of the mesh in the array of `MeshInputUniform`s. - pub mesh_index: u32, - +pub struct IndirectParametersCpuMetadata { /// The index of the first instance of this mesh in the array of /// `MeshUniform`s. /// @@ -641,9 +698,26 @@ pub struct IndirectParametersMetadata { /// /// A *batch set* is a set of meshes that may be multi-drawn together. /// Multiple batches (and therefore multiple instances of - /// [`IndirectParametersMetadata`] structures) can be part of the same batch - /// set. + /// [`IndirectParametersGpuMetadata`] structures) can be part of the same + /// batch set. pub batch_set_index: u32, +} + +/// A structure, written and read GPU, that records how many instances of each +/// mesh are actually to be drawn. +/// +/// The GPU mesh preprocessing shader increments the +/// [`Self::early_instance_count`] and [`Self::late_instance_count`] as it +/// determines that meshes are visible. The indirect parameter building shader +/// reads this metadata in order to construct the indirect draw parameters. +/// +/// Each batch will have one instance of this structure. +#[derive(Clone, Copy, Default, Pod, Zeroable, ShaderType)] +#[repr(C)] +pub struct IndirectParametersGpuMetadata { + /// The index of the first mesh in this batch in the array of + /// `MeshInputUniform`s. + pub mesh_index: u32, /// The number of instances that were judged visible last frame. /// @@ -695,236 +769,137 @@ pub struct IndirectBatchSet { /// (`multi_draw_indirect`, `multi_draw_indirect_count`) use to draw the scene. /// /// In addition to the indirect draw buffers themselves, this structure contains -/// the buffers that store [`IndirectParametersMetadata`], which are the +/// the buffers that store [`IndirectParametersGpuMetadata`], which are the /// structures that culling writes to so that the indirect parameter building /// pass can determine how many meshes are actually to be drawn. /// /// These buffers will remain empty if indirect drawing isn't in use. -#[derive(Resource)] +#[derive(Resource, Deref, DerefMut)] pub struct IndirectParametersBuffers { - /// The GPU buffer that stores the indirect draw parameters for non-indexed - /// meshes. + /// A mapping from a phase type ID to the indirect parameters buffers for + /// that phase. /// - /// The indirect parameters building shader writes to this buffer, while the - /// `multi_draw_indirect` or `multi_draw_indirect_count` commands read from - /// it to perform the draws. - non_indexed_data: UninitBufferVec, - - /// The GPU buffer that holds the data used to construct indirect draw - /// parameters for non-indexed meshes. + /// Examples of phase type IDs are `Opaque3d` and `AlphaMask3d`. + #[deref] + pub buffers: TypeIdMap, + /// If true, this sets the `COPY_SRC` flag on indirect draw parameters so + /// that they can be read back to CPU. /// - /// The GPU mesh preprocessing shader writes to this buffer, and the - /// indirect parameters building shader reads this buffer to construct the - /// indirect draw parameters. - non_indexed_metadata: RawBufferVec, - - /// The GPU buffer that holds the number of indirect draw commands for each - /// phase of each view, for non-indexed meshes. - /// - /// The indirect parameters building shader writes to this buffer, and the - /// `multi_draw_indirect_count` command reads from it in order to know how - /// many indirect draw commands to process. - non_indexed_batch_sets: RawBufferVec, - - /// The GPU buffer that stores the indirect draw parameters for indexed - /// meshes. - /// - /// The indirect parameters building shader writes to this buffer, while the - /// `multi_draw_indirect` or `multi_draw_indirect_count` commands read from - /// it to perform the draws. - indexed_data: UninitBufferVec, - - /// The GPU buffer that holds the data used to construct indirect draw - /// parameters for indexed meshes. - /// - /// The GPU mesh preprocessing shader writes to this buffer, and the - /// indirect parameters building shader reads this buffer to construct the - /// indirect draw parameters. - indexed_metadata: RawBufferVec, - - /// The GPU buffer that holds the number of indirect draw commands for each - /// phase of each view, for indexed meshes. - /// - /// The indirect parameters building shader writes to this buffer, and the - /// `multi_draw_indirect_count` command reads from it in order to know how - /// many indirect draw commands to process. - indexed_batch_sets: RawBufferVec, + /// This is a debugging feature that may reduce performance. It primarily + /// exists for the `occlusion_culling` example. + pub allow_copies_from_indirect_parameter_buffers: bool, } impl IndirectParametersBuffers { - /// Creates the indirect parameters buffers. + /// Initializes a new [`IndirectParametersBuffers`] resource. pub fn new(allow_copies_from_indirect_parameter_buffers: bool) -> IndirectParametersBuffers { + IndirectParametersBuffers { + buffers: TypeIdMap::default(), + allow_copies_from_indirect_parameter_buffers, + } + } +} + +/// The buffers containing all the information that indirect draw commands use +/// to draw the scene, for a single phase. +/// +/// This is the version of the structure that has a type parameter, so that the +/// batching for different phases can run in parallel. +/// +/// See the [`IndirectParametersBuffers`] documentation for more information. +#[derive(Resource)] +pub struct PhaseIndirectParametersBuffers +where + PI: PhaseItem, +{ + /// The indirect draw buffers for the phase. + pub buffers: UntypedPhaseIndirectParametersBuffers, + phantom: PhantomData, +} + +impl PhaseIndirectParametersBuffers +where + PI: PhaseItem, +{ + pub fn new(allow_copies_from_indirect_parameter_buffers: bool) -> Self { + PhaseIndirectParametersBuffers { + buffers: UntypedPhaseIndirectParametersBuffers::new( + allow_copies_from_indirect_parameter_buffers, + ), + phantom: PhantomData, + } + } +} + +/// The buffers containing all the information that indirect draw commands use +/// to draw the scene, for a single phase. +/// +/// This is the version of the structure that doesn't have a type parameter, so +/// that it can be inserted into [`IndirectParametersBuffers::buffers`] +/// +/// See the [`IndirectParametersBuffers`] documentation for more information. +pub struct UntypedPhaseIndirectParametersBuffers { + /// Information that indirect draw commands use to draw indexed meshes in + /// the scene. + pub indexed: MeshClassIndirectParametersBuffers, + /// Information that indirect draw commands use to draw non-indexed meshes + /// in the scene. + pub non_indexed: MeshClassIndirectParametersBuffers, +} + +impl UntypedPhaseIndirectParametersBuffers { + /// Creates the indirect parameters buffers. + pub fn new( + allow_copies_from_indirect_parameter_buffers: bool, + ) -> UntypedPhaseIndirectParametersBuffers { let mut indirect_parameter_buffer_usages = BufferUsages::STORAGE | BufferUsages::INDIRECT; if allow_copies_from_indirect_parameter_buffers { indirect_parameter_buffer_usages |= BufferUsages::COPY_SRC; } - IndirectParametersBuffers { - non_indexed_data: UninitBufferVec::new(indirect_parameter_buffer_usages), - non_indexed_metadata: RawBufferVec::new(BufferUsages::STORAGE), - non_indexed_batch_sets: RawBufferVec::new(indirect_parameter_buffer_usages), - indexed_data: UninitBufferVec::new(indirect_parameter_buffer_usages), - indexed_metadata: RawBufferVec::new(BufferUsages::STORAGE), - indexed_batch_sets: RawBufferVec::new(indirect_parameter_buffer_usages), + UntypedPhaseIndirectParametersBuffers { + non_indexed: MeshClassIndirectParametersBuffers::new( + allow_copies_from_indirect_parameter_buffers, + ), + indexed: MeshClassIndirectParametersBuffers::new( + allow_copies_from_indirect_parameter_buffers, + ), } } - /// Returns the GPU buffer that stores the indirect draw parameters for - /// indexed meshes. - /// - /// The indirect parameters building shader writes to this buffer, while the - /// `multi_draw_indirect` or `multi_draw_indirect_count` commands read from - /// it to perform the draws. - #[inline] - pub fn indexed_data_buffer(&self) -> Option<&Buffer> { - self.indexed_data.buffer() - } - - /// Returns the GPU buffer that holds the data used to construct indirect - /// draw parameters for indexed meshes. - /// - /// The GPU mesh preprocessing shader writes to this buffer, and the - /// indirect parameters building shader reads this buffer to construct the - /// indirect draw parameters. - #[inline] - pub fn indexed_metadata_buffer(&self) -> Option<&Buffer> { - self.indexed_metadata.buffer() - } - - /// Returns the GPU buffer that holds the number of indirect draw commands - /// for each phase of each view, for indexed meshes. - /// - /// The indirect parameters building shader writes to this buffer, and the - /// `multi_draw_indirect_count` command reads from it in order to know how - /// many indirect draw commands to process. - #[inline] - pub fn indexed_batch_sets_buffer(&self) -> Option<&Buffer> { - self.indexed_batch_sets.buffer() - } - - /// Returns the GPU buffer that stores the indirect draw parameters for - /// non-indexed meshes. - /// - /// The indirect parameters building shader writes to this buffer, while the - /// `multi_draw_indirect` or `multi_draw_indirect_count` commands read from - /// it to perform the draws. - #[inline] - pub fn non_indexed_data_buffer(&self) -> Option<&Buffer> { - self.non_indexed_data.buffer() - } - - /// Returns the GPU buffer that holds the data used to construct indirect - /// draw parameters for non-indexed meshes. - /// - /// The GPU mesh preprocessing shader writes to this buffer, and the - /// indirect parameters building shader reads this buffer to construct the - /// indirect draw parameters. - #[inline] - pub fn non_indexed_metadata_buffer(&self) -> Option<&Buffer> { - self.non_indexed_metadata.buffer() - } - - /// Returns the GPU buffer that holds the number of indirect draw commands - /// for each phase of each view, for non-indexed meshes. - /// - /// The indirect parameters building shader writes to this buffer, and the - /// `multi_draw_indirect_count` command reads from it in order to know how - /// many indirect draw commands to process. - #[inline] - pub fn non_indexed_batch_sets_buffer(&self) -> Option<&Buffer> { - self.non_indexed_batch_sets.buffer() - } - - /// Reserves space for `count` new batches corresponding to indexed meshes. - /// - /// This allocates in both the [`Self::indexed_metadata`] and - /// [`Self::indexed_data`] buffers. - fn allocate_indexed(&mut self, count: u32) -> u32 { - let length = self.indexed_data.len(); - self.indexed_metadata.reserve_internal(count as usize); - for _ in 0..count { - self.indexed_data.add(); - self.indexed_metadata - .push(IndirectParametersMetadata::default()); - } - length as u32 - } - - /// Reserves space for `count` new batches corresponding to non-indexed - /// meshes. - /// - /// This allocates in both the `non_indexed_metadata` and `non_indexed_data` - /// buffers. - pub fn allocate_non_indexed(&mut self, count: u32) -> u32 { - let length = self.non_indexed_data.len(); - self.non_indexed_metadata.reserve_internal(count as usize); - for _ in 0..count { - self.non_indexed_data.add(); - self.non_indexed_metadata - .push(IndirectParametersMetadata::default()); - } - length as u32 - } - /// Reserves space for `count` new batches. /// /// The `indexed` parameter specifies whether the meshes that these batches /// correspond to are indexed or not. pub fn allocate(&mut self, indexed: bool, count: u32) -> u32 { if indexed { - self.allocate_indexed(count) + self.indexed.allocate(count) } else { - self.allocate_non_indexed(count) + self.non_indexed.allocate(count) } } - /// Initializes the batch corresponding to an indexed mesh at the given - /// index with the given [`IndirectParametersMetadata`]. - pub fn set_indexed(&mut self, index: u32, value: IndirectParametersMetadata) { - self.indexed_metadata.set(index, value); - } - - /// Initializes the batch corresponding to a non-indexed mesh at the given - /// index with the given [`IndirectParametersMetadata`]. - pub fn set_non_indexed(&mut self, index: u32, value: IndirectParametersMetadata) { - self.non_indexed_metadata.set(index, value); - } - /// Returns the number of batches currently allocated. /// /// The `indexed` parameter specifies whether the meshes that these batches /// correspond to are indexed or not. fn batch_count(&self, indexed: bool) -> usize { if indexed { - self.indexed_batch_count() + self.indexed.batch_count() } else { - self.non_indexed_batch_count() + self.non_indexed.batch_count() } } - /// Returns the number of batches corresponding to indexed meshes that are - /// currently allocated. - #[inline] - pub fn indexed_batch_count(&self) -> usize { - self.indexed_data.len() - } - - /// Returns the number of batches corresponding to non-indexed meshes that - /// are currently allocated. - #[inline] - pub fn non_indexed_batch_count(&self) -> usize { - self.non_indexed_data.len() - } - /// Returns the number of batch sets currently allocated. /// /// The `indexed` parameter specifies whether the meshes that these batch /// sets correspond to are indexed or not. pub fn batch_set_count(&self, indexed: bool) -> usize { if indexed { - self.indexed_batch_sets.len() + self.indexed.batch_sets.len() } else { - self.non_indexed_batch_sets.len() + self.non_indexed.batch_sets.len() } } @@ -935,23 +910,174 @@ impl IndirectParametersBuffers { /// to are indexed or not. `indirect_parameters_base` specifies the offset /// within `Self::indexed_data` or `Self::non_indexed_data` of the first /// batch in this batch set. + #[inline] pub fn add_batch_set(&mut self, indexed: bool, indirect_parameters_base: u32) { if indexed { - self.indexed_batch_sets.push(IndirectBatchSet { + self.indexed.batch_sets.push(IndirectBatchSet { indirect_parameters_base, indirect_parameters_count: 0, }); } else { - self.non_indexed_batch_sets.push(IndirectBatchSet { + self.non_indexed.batch_sets.push(IndirectBatchSet { indirect_parameters_base, indirect_parameters_count: 0, }); } } + /// Returns the index that a newly-added batch set will have. + /// + /// The `indexed` parameter specifies whether the meshes in such a batch set + /// are indexed or not. pub fn get_next_batch_set_index(&self, indexed: bool) -> Option { NonMaxU32::new(self.batch_set_count(indexed) as u32) } + + /// Clears out the buffers in preparation for a new frame. + pub fn clear(&mut self) { + self.indexed.clear(); + self.non_indexed.clear(); + } +} + +/// The buffers containing all the information that indirect draw commands use +/// to draw the scene, for a single mesh class (indexed or non-indexed), for a +/// single phase. +pub struct MeshClassIndirectParametersBuffers +where + IP: Clone + ShaderSize + WriteInto, +{ + /// The GPU buffer that stores the indirect draw parameters for the meshes. + /// + /// The indirect parameters building shader writes to this buffer, while the + /// `multi_draw_indirect` or `multi_draw_indirect_count` commands read from + /// it to perform the draws. + data: UninitBufferVec, + + /// The GPU buffer that holds the data used to construct indirect draw + /// parameters for meshes. + /// + /// The GPU mesh preprocessing shader writes to this buffer, and the + /// indirect parameters building shader reads this buffer to construct the + /// indirect draw parameters. + cpu_metadata: RawBufferVec, + + /// The GPU buffer that holds data built by the GPU used to construct + /// indirect draw parameters for meshes. + /// + /// The GPU mesh preprocessing shader writes to this buffer, and the + /// indirect parameters building shader reads this buffer to construct the + /// indirect draw parameters. + gpu_metadata: UninitBufferVec, + + /// The GPU buffer that holds the number of indirect draw commands for each + /// phase of each view, for meshes. + /// + /// The indirect parameters building shader writes to this buffer, and the + /// `multi_draw_indirect_count` command reads from it in order to know how + /// many indirect draw commands to process. + batch_sets: RawBufferVec, +} + +impl MeshClassIndirectParametersBuffers +where + IP: Clone + ShaderSize + WriteInto, +{ + fn new( + allow_copies_from_indirect_parameter_buffers: bool, + ) -> MeshClassIndirectParametersBuffers { + let mut indirect_parameter_buffer_usages = BufferUsages::STORAGE | BufferUsages::INDIRECT; + if allow_copies_from_indirect_parameter_buffers { + indirect_parameter_buffer_usages |= BufferUsages::COPY_SRC; + } + + MeshClassIndirectParametersBuffers { + data: UninitBufferVec::new(indirect_parameter_buffer_usages), + cpu_metadata: RawBufferVec::new(BufferUsages::STORAGE), + gpu_metadata: UninitBufferVec::new(BufferUsages::STORAGE), + batch_sets: RawBufferVec::new(indirect_parameter_buffer_usages), + } + } + + /// Returns the GPU buffer that stores the indirect draw parameters for + /// indexed meshes. + /// + /// The indirect parameters building shader writes to this buffer, while the + /// `multi_draw_indirect` or `multi_draw_indirect_count` commands read from + /// it to perform the draws. + #[inline] + pub fn data_buffer(&self) -> Option<&Buffer> { + self.data.buffer() + } + + /// Returns the GPU buffer that holds the CPU-constructed data used to + /// construct indirect draw parameters for meshes. + /// + /// The CPU writes to this buffer, and the indirect parameters building + /// shader reads this buffer to construct the indirect draw parameters. + #[inline] + pub fn cpu_metadata_buffer(&self) -> Option<&Buffer> { + self.cpu_metadata.buffer() + } + + /// Returns the GPU buffer that holds the GPU-constructed data used to + /// construct indirect draw parameters for meshes. + /// + /// The GPU mesh preprocessing shader writes to this buffer, and the + /// indirect parameters building shader reads this buffer to construct the + /// indirect draw parameters. + #[inline] + pub fn gpu_metadata_buffer(&self) -> Option<&Buffer> { + self.gpu_metadata.buffer() + } + + /// Returns the GPU buffer that holds the number of indirect draw commands + /// for each phase of each view. + /// + /// The indirect parameters building shader writes to this buffer, and the + /// `multi_draw_indirect_count` command reads from it in order to know how + /// many indirect draw commands to process. + #[inline] + pub fn batch_sets_buffer(&self) -> Option<&Buffer> { + self.batch_sets.buffer() + } + + /// Reserves space for `count` new batches. + /// + /// This allocates in the [`Self::cpu_metadata`], [`Self::gpu_metadata`], + /// and [`Self::data`] buffers. + fn allocate(&mut self, count: u32) -> u32 { + let length = self.data.len(); + self.cpu_metadata.reserve_internal(count as usize); + self.gpu_metadata.add_multiple(count as usize); + for _ in 0..count { + self.data.add(); + self.cpu_metadata + .push(IndirectParametersCpuMetadata::default()); + } + length as u32 + } + + /// Sets the [`IndirectParametersCpuMetadata`] for the mesh at the given + /// index. + pub fn set(&mut self, index: u32, value: IndirectParametersCpuMetadata) { + self.cpu_metadata.set(index, value); + } + + /// Returns the number of batches corresponding to meshes that are currently + /// allocated. + #[inline] + pub fn batch_count(&self) -> usize { + self.data.len() + } + + /// Clears out all the buffers in preparation for a new frame. + pub fn clear(&mut self) { + self.data.clear(); + self.cpu_metadata.clear(); + self.gpu_metadata.clear(); + self.batch_sets.clear(); + } } impl Default for IndirectParametersBuffers { @@ -967,32 +1093,46 @@ impl FromWorld for GpuPreprocessingSupport { let adapter = world.resource::(); let device = world.resource::(); - // Filter some Qualcomm devices on Android as they crash when using GPU - // preprocessing. - // We filter out Adreno 730 and earlier GPUs (except 720, as it's newer - // than 730). + // Filter Android drivers that are incompatible with GPU preprocessing: + // - We filter out Adreno 730 and earlier GPUs (except 720, as it's newer + // than 730). + // - We filter out Mali GPUs with driver versions lower than 48. fn is_non_supported_android_device(adapter: &RenderAdapter) -> bool { crate::get_adreno_model(adapter).is_some_and(|model| model != 720 && model <= 730) + || crate::get_mali_driver_version(adapter).is_some_and(|version| version < 48) } - let feature_support = device.features().contains( + let culling_feature_support = device.features().contains( Features::INDIRECT_FIRST_INSTANCE | Features::MULTI_DRAW_INDIRECT | Features::PUSH_CONSTANTS, ); // Depth downsampling for occlusion culling requires 12 textures - let limit_support = device.limits().max_storage_textures_per_shader_stage >= 12; + let limit_support = device.limits().max_storage_textures_per_shader_stage >= 12 && + // Even if the adapter supports compute, we might be simulating a lack of + // compute via device limits (see `WgpuSettingsPriority::WebGL2` and + // `wgpu::Limits::downlevel_webgl2_defaults()`). This will have set all the + // `max_compute_*` limits to zero, so we arbitrarily pick one as a canary. + device.limits().max_compute_workgroup_storage_size != 0; + let downlevel_support = adapter.get_downlevel_capabilities().flags.contains( + DownlevelFlags::COMPUTE_SHADERS | DownlevelFlags::VERTEX_AND_INSTANCE_INDEX_RESPECTS_RESPECTIVE_FIRST_VALUE_IN_INDIRECT_DRAW ); let max_supported_mode = if device.limits().max_compute_workgroup_size_x == 0 || is_non_supported_android_device(adapter) { + info!( + "GPU preprocessing is not supported on this device. \ + Falling back to CPU preprocessing.", + ); GpuPreprocessingMode::None - } else if !(feature_support && limit_support && downlevel_support) { + } else if !(culling_feature_support && limit_support && downlevel_support) { + info!("Some GPU preprocessing are limited on this device."); GpuPreprocessingMode::PreprocessingOnly } else { + info!("GPU preprocessing is fully supported on this device."); GpuPreprocessingMode::Culling }; @@ -1007,11 +1147,25 @@ where { /// Creates new buffers. pub fn new() -> Self { - BatchedInstanceBuffers { + Self::default() + } + + /// Clears out the buffers in preparation for a new frame. + pub fn clear(&mut self) { + for phase_instance_buffer in self.phase_instance_buffers.values_mut() { + phase_instance_buffer.clear(); + } + } +} + +impl UntypedPhaseBatchedInstanceBuffers +where + BD: GpuArrayBufferable + Sync + Send + 'static, +{ + pub fn new() -> Self { + UntypedPhaseBatchedInstanceBuffers { data_buffer: UninitBufferVec::new(BufferUsages::STORAGE), work_item_buffers: HashMap::default(), - current_input_buffer: InstanceInputUniformBuffer::new(), - previous_input_buffer: InstanceInputUniformBuffer::new(), late_indexed_indirect_parameters_buffer: RawBufferVec::new( BufferUsages::STORAGE | BufferUsages::INDIRECT, ), @@ -1039,17 +1193,14 @@ where // Clear each individual set of buffers, but don't depopulate the hash // table. We want to avoid reallocating these vectors every frame. for view_work_item_buffers in self.work_item_buffers.values_mut() { - for phase_work_item_buffers in view_work_item_buffers.values_mut() { - phase_work_item_buffers.clear(); - } + view_work_item_buffers.clear(); } } } -impl Default for BatchedInstanceBuffers +impl Default for UntypedPhaseBatchedInstanceBuffers where BD: GpuArrayBufferable + Sync + Send + 'static, - BDI: Pod + Default + Sync + Send + 'static, { fn default() -> Self { Self::new() @@ -1098,7 +1249,7 @@ where self, instance_end_index: u32, phase: &mut SortedRenderPhase, - indirect_parameters_buffers: &mut IndirectParametersBuffers, + phase_indirect_parameters_buffers: &mut UntypedPhaseIndirectParametersBuffers, ) where I: CachedRenderPipelinePhaseItem + SortedPhaseItem, { @@ -1114,7 +1265,7 @@ where None => PhaseItemExtraIndex::None, }; if let Some(indirect_parameters_index) = self.indirect_parameters_index { - indirect_parameters_buffers + phase_indirect_parameters_buffers .add_batch_set(self.indexed, indirect_parameters_index.into()); } } @@ -1133,6 +1284,8 @@ pub fn clear_batched_gpu_instance_buffers( ) where GFBD: GetFullBatchData, { + // Don't clear the entire table, because that would delete the buffers, and + // we want to reuse those allocations. if let Some(mut gpu_batched_instance_buffers) = gpu_batched_instance_buffers { gpu_batched_instance_buffers.clear(); } @@ -1156,17 +1309,24 @@ pub fn delete_old_work_item_buffers( .iter() .map(|extracted_view| extracted_view.retained_view_entity) .collect(); - gpu_batched_instance_buffers - .work_item_buffers - .retain(|retained_view_entity, _| retained_view_entities.contains(retained_view_entity)); + for phase_instance_buffers in gpu_batched_instance_buffers + .phase_instance_buffers + .values_mut() + { + phase_instance_buffers + .work_item_buffers + .retain(|retained_view_entity, _| { + retained_view_entities.contains(retained_view_entity) + }); + } } /// Batch the items in a sorted render phase, when GPU instance buffer building /// is in use. This means comparing metadata needed to draw each phase item and /// trying to combine the draws into a batch. pub fn batch_and_prepare_sorted_render_phase( - gpu_array_buffer: ResMut>, - mut indirect_parameters_buffers: ResMut, + mut phase_batched_instance_buffers: ResMut>, + mut phase_indirect_parameters_buffers: ResMut>, mut sorted_render_phases: ResMut>, mut views: Query<( &ExtractedView, @@ -1179,13 +1339,12 @@ pub fn batch_and_prepare_sorted_render_phase( GFBD: GetFullBatchData, { // We only process GPU-built batch data in this function. - let BatchedInstanceBuffers { + let UntypedPhaseBatchedInstanceBuffers { ref mut data_buffer, ref mut work_item_buffers, ref mut late_indexed_indirect_parameters_buffer, ref mut late_non_indexed_indirect_parameters_buffer, - .. - } = gpu_array_buffer.into_inner(); + } = phase_batched_instance_buffers.buffers; for (extracted_view, no_indirect_drawing, gpu_occlusion_culling) in &mut views { let Some(phase) = sorted_render_phases.get_mut(&extracted_view.retained_view_entity) else { @@ -1210,8 +1369,6 @@ pub fn batch_and_prepare_sorted_render_phase( // Walk through the list of phase items, building up batches as we go. let mut batch: Option> = None; - let mut first_output_index = data_buffer.len() as u32; - for current_index in 0..phase.items.len() { // Get the index of the input data, and comparison metadata, for // this entity. @@ -1231,7 +1388,7 @@ pub fn batch_and_prepare_sorted_render_phase( batch.flush( data_buffer.len() as u32, phase, - &mut indirect_parameters_buffers, + &mut phase_indirect_parameters_buffers.buffers, ); } @@ -1257,25 +1414,38 @@ pub fn batch_and_prepare_sorted_render_phase( if !can_batch { // Break a batch if we need to. if let Some(batch) = batch.take() { - batch.flush(output_index, phase, &mut indirect_parameters_buffers); + batch.flush( + output_index, + phase, + &mut phase_indirect_parameters_buffers.buffers, + ); } let indirect_parameters_index = if no_indirect_drawing { None } else if item_is_indexed { - Some(indirect_parameters_buffers.allocate_indexed(1)) + Some( + phase_indirect_parameters_buffers + .buffers + .indexed + .allocate(1), + ) } else { - Some(indirect_parameters_buffers.allocate_non_indexed(1)) + Some( + phase_indirect_parameters_buffers + .buffers + .non_indexed + .allocate(1), + ) }; // Start a new batch. if let Some(indirect_parameters_index) = indirect_parameters_index { GFBD::write_batch_indirect_parameters_metadata( - current_input_index.into(), item_is_indexed, output_index, None, - &mut indirect_parameters_buffers, + &mut phase_indirect_parameters_buffers.buffers, indirect_parameters_index, ); }; @@ -1287,8 +1457,6 @@ pub fn batch_and_prepare_sorted_render_phase( indirect_parameters_index: indirect_parameters_index.and_then(NonMaxU32::new), meta: current_meta, }); - - first_output_index = output_index; } // Add a new preprocessing work item so that the preprocessing @@ -1298,14 +1466,15 @@ pub fn batch_and_prepare_sorted_render_phase( item_is_indexed, PreprocessWorkItem { input_index: current_input_index.into(), - output_index: if no_indirect_drawing { - output_index - } else { - first_output_index - }, - indirect_parameters_index: match batch.indirect_parameters_index { - Some(indirect_parameters_index) => indirect_parameters_index.into(), - None => 0, + output_or_indirect_parameters_index: match ( + no_indirect_drawing, + batch.indirect_parameters_index, + ) { + (true, _) => output_index, + (false, Some(indirect_parameters_index)) => { + indirect_parameters_index.into() + } + (false, None) => 0, }, }, ); @@ -1317,7 +1486,7 @@ pub fn batch_and_prepare_sorted_render_phase( batch.flush( data_buffer.len() as u32, phase, - &mut indirect_parameters_buffers, + &mut phase_indirect_parameters_buffers.buffers, ); } } @@ -1325,8 +1494,8 @@ pub fn batch_and_prepare_sorted_render_phase( /// Creates batches for a render phase that uses bins. pub fn batch_and_prepare_binned_render_phase( - gpu_array_buffer: ResMut>, - mut indirect_parameters_buffers: ResMut, + mut phase_batched_instance_buffers: ResMut>, + phase_indirect_parameters_buffers: ResMut>, mut binned_render_phases: ResMut>, mut views: Query< ( @@ -1343,13 +1512,14 @@ pub fn batch_and_prepare_binned_render_phase( { let system_param_item = param.into_inner(); - let BatchedInstanceBuffers { + let phase_indirect_parameters_buffers = phase_indirect_parameters_buffers.into_inner(); + + let UntypedPhaseBatchedInstanceBuffers { ref mut data_buffer, ref mut work_item_buffers, ref mut late_indexed_indirect_parameters_buffer, ref mut late_non_indexed_indirect_parameters_buffer, - .. - } = gpu_array_buffer.into_inner(); + } = phase_batched_instance_buffers.buffers; for (extracted_view, no_indirect_drawing, gpu_occlusion_culling) in &mut views { let Some(phase) = binned_render_phases.get_mut(&extracted_view.retained_view_entity) else { @@ -1374,118 +1544,76 @@ pub fn batch_and_prepare_binned_render_phase( // Prepare multidrawables. - for batch_set_key in &phase.multidrawable_mesh_keys { - let mut batch_set = None; - let indirect_parameters_base = - indirect_parameters_buffers.batch_count(batch_set_key.indexed()) as u32; - for (bin_key, bin) in &phase.multidrawable_mesh_values[batch_set_key] { - let first_output_index = data_buffer.len() as u32; - let mut batch: Option = None; + if let ( + &mut BinnedRenderPhaseBatchSets::MultidrawIndirect(ref mut batch_sets), + &mut PreprocessWorkItemBuffers::Indirect { + indexed: ref mut indexed_work_item_buffer, + non_indexed: ref mut non_indexed_work_item_buffer, + gpu_occlusion_culling: ref mut gpu_occlusion_culling_buffers, + }, + ) = (&mut phase.batch_sets, &mut *work_item_buffer) + { + let mut output_index = data_buffer.len() as u32; - for main_entity in bin.entities() { - let Some(input_index) = - GFBD::get_binned_index(&system_param_item, *main_entity) - else { - continue; - }; - let output_index = data_buffer.add() as u32; + // Initialize the state for both indexed and non-indexed meshes. + let mut indexed_preparer: MultidrawableBatchSetPreparer = + MultidrawableBatchSetPreparer::new( + phase_indirect_parameters_buffers.buffers.batch_count(true) as u32, + phase_indirect_parameters_buffers + .buffers + .indexed + .batch_sets + .len() as u32, + ); + let mut non_indexed_preparer: MultidrawableBatchSetPreparer = + MultidrawableBatchSetPreparer::new( + phase_indirect_parameters_buffers.buffers.batch_count(false) as u32, + phase_indirect_parameters_buffers + .buffers + .non_indexed + .batch_sets + .len() as u32, + ); - match batch { - Some(ref mut batch) => { - // Append to the current batch. - batch.instance_range.end = output_index + 1; - work_item_buffer.push( - batch_set_key.indexed(), - PreprocessWorkItem { - input_index: input_index.into(), - output_index: first_output_index, - indirect_parameters_index: match batch.extra_index { - PhaseItemExtraIndex::IndirectParametersIndex { - ref range, - .. - } => range.start, - PhaseItemExtraIndex::DynamicOffset(_) - | PhaseItemExtraIndex::None => 0, - }, - }, - ); - } - - None => { - // Start a new batch, in indirect mode. - let indirect_parameters_index = - indirect_parameters_buffers.allocate(batch_set_key.indexed(), 1); - let batch_set_index = indirect_parameters_buffers - .get_next_batch_set_index(batch_set_key.indexed()); - - GFBD::write_batch_indirect_parameters_metadata( - input_index.into(), - batch_set_key.indexed(), - output_index, - batch_set_index, - &mut indirect_parameters_buffers, - indirect_parameters_index, - ); - work_item_buffer.push( - batch_set_key.indexed(), - PreprocessWorkItem { - input_index: input_index.into(), - output_index: first_output_index, - indirect_parameters_index, - }, - ); - batch = Some(BinnedRenderPhaseBatch { - representative_entity: (Entity::PLACEHOLDER, *main_entity), - instance_range: output_index..output_index + 1, - extra_index: PhaseItemExtraIndex::maybe_indirect_parameters_index( - NonMaxU32::new(indirect_parameters_index), - ), - }); - } - } - } - - if let Some(batch) = batch { - match batch_set { - None => { - batch_set = Some(BinnedRenderPhaseBatchSet { - first_batch: batch, - batch_count: 1, - bin_key: bin_key.clone(), - index: indirect_parameters_buffers - .batch_set_count(batch_set_key.indexed()) - as u32, - }); - } - Some(ref mut batch_set) => { - batch_set.batch_count += 1; - } - } + // Prepare each batch set. + for (batch_set_key, bins) in &phase.multidrawable_meshes { + if batch_set_key.indexed() { + indexed_preparer.prepare_multidrawable_binned_batch_set( + bins, + &mut output_index, + data_buffer, + indexed_work_item_buffer, + &mut phase_indirect_parameters_buffers.buffers.indexed, + batch_sets, + ); + } else { + non_indexed_preparer.prepare_multidrawable_binned_batch_set( + bins, + &mut output_index, + data_buffer, + non_indexed_work_item_buffer, + &mut phase_indirect_parameters_buffers.buffers.non_indexed, + batch_sets, + ); } } - if let BinnedRenderPhaseBatchSets::MultidrawIndirect(ref mut batch_sets) = - phase.batch_sets - { - if let Some(batch_set) = batch_set { - batch_sets.push(batch_set); - indirect_parameters_buffers - .add_batch_set(batch_set_key.indexed(), indirect_parameters_base); - } + // Reserve space in the occlusion culling buffers, if necessary. + if let Some(gpu_occlusion_culling_buffers) = gpu_occlusion_culling_buffers { + gpu_occlusion_culling_buffers + .late_indexed + .add_multiple(indexed_preparer.work_item_count); + gpu_occlusion_culling_buffers + .late_non_indexed + .add_multiple(non_indexed_preparer.work_item_count); } } // Prepare batchables. - for key in &phase.batchable_mesh_keys { - let first_output_index = data_buffer.len() as u32; - + for (key, bin) in &phase.batchable_meshes { let mut batch: Option = None; - for main_entity in phase.batchable_mesh_values[key].entities() { - let Some(input_index) = GFBD::get_binned_index(&system_param_item, *main_entity) - else { - continue; - }; + for (&main_entity, &input_index) in bin.entities() { let output_index = data_buffer.add() as u32; match batch { @@ -1502,19 +1630,21 @@ pub fn batch_and_prepare_binned_render_phase( work_item_buffer.push( key.0.indexed(), PreprocessWorkItem { - input_index: input_index.into(), - output_index: if no_indirect_drawing { - output_index - } else { - first_output_index - }, - indirect_parameters_index: match batch.extra_index { - PhaseItemExtraIndex::IndirectParametersIndex { - range: ref indirect_parameters_range, - .. - } => indirect_parameters_range.start, - PhaseItemExtraIndex::DynamicOffset(_) - | PhaseItemExtraIndex::None => 0, + input_index: *input_index, + output_or_indirect_parameters_index: match ( + no_indirect_drawing, + &batch.extra_index, + ) { + (true, _) => output_index, + ( + false, + PhaseItemExtraIndex::IndirectParametersIndex { + range: indirect_parameters_range, + .. + }, + ) => indirect_parameters_range.start, + (false, &PhaseItemExtraIndex::DynamicOffset(_)) + | (false, &PhaseItemExtraIndex::None) => 0, }, }, ); @@ -1522,29 +1652,29 @@ pub fn batch_and_prepare_binned_render_phase( None if !no_indirect_drawing => { // Start a new batch, in indirect mode. - let indirect_parameters_index = - indirect_parameters_buffers.allocate(key.0.indexed(), 1); - let batch_set_index = - indirect_parameters_buffers.get_next_batch_set_index(key.0.indexed()); + let indirect_parameters_index = phase_indirect_parameters_buffers + .buffers + .allocate(key.0.indexed(), 1); + let batch_set_index = phase_indirect_parameters_buffers + .buffers + .get_next_batch_set_index(key.0.indexed()); GFBD::write_batch_indirect_parameters_metadata( - input_index.into(), key.0.indexed(), output_index, batch_set_index, - &mut indirect_parameters_buffers, + &mut phase_indirect_parameters_buffers.buffers, indirect_parameters_index, ); work_item_buffer.push( key.0.indexed(), PreprocessWorkItem { - input_index: input_index.into(), - output_index: first_output_index, - indirect_parameters_index, + input_index: *input_index, + output_or_indirect_parameters_index: indirect_parameters_index, }, ); batch = Some(BinnedRenderPhaseBatch { - representative_entity: (Entity::PLACEHOLDER, *main_entity), + representative_entity: (Entity::PLACEHOLDER, main_entity), instance_range: output_index..output_index + 1, extra_index: PhaseItemExtraIndex::IndirectParametersIndex { range: indirect_parameters_index..(indirect_parameters_index + 1), @@ -1558,13 +1688,12 @@ pub fn batch_and_prepare_binned_render_phase( work_item_buffer.push( key.0.indexed(), PreprocessWorkItem { - input_index: input_index.into(), - output_index, - indirect_parameters_index: 0, + input_index: *input_index, + output_or_indirect_parameters_index: output_index, }, ); batch = Some(BinnedRenderPhaseBatch { - representative_entity: (Entity::PLACEHOLDER, *main_entity), + representative_entity: (Entity::PLACEHOLDER, main_entity), instance_range: output_index..output_index + 1, extra_index: PhaseItemExtraIndex::None, }); @@ -1589,7 +1718,9 @@ pub fn batch_and_prepare_binned_render_phase( first_batch: batch, batch_count: 1, bin_key: key.1.clone(), - index: indirect_parameters_buffers.batch_set_count(key.0.indexed()) + index: phase_indirect_parameters_buffers + .buffers + .batch_set_count(key.0.indexed()) as u32, }); } @@ -1598,21 +1729,23 @@ pub fn batch_and_prepare_binned_render_phase( } // Prepare unbatchables. - for key in &phase.unbatchable_mesh_keys { - let unbatchables = phase.unbatchable_mesh_values.get_mut(key).unwrap(); - + for (key, unbatchables) in &mut phase.unbatchable_meshes { // Allocate the indirect parameters if necessary. let mut indirect_parameters_offset = if no_indirect_drawing { None } else if key.0.indexed() { Some( - indirect_parameters_buffers - .allocate_indexed(unbatchables.entities.len() as u32), + phase_indirect_parameters_buffers + .buffers + .indexed + .allocate(unbatchables.entities.len() as u32), ) } else { Some( - indirect_parameters_buffers - .allocate_non_indexed(unbatchables.entities.len() as u32), + phase_indirect_parameters_buffers + .buffers + .non_indexed + .allocate(unbatchables.entities.len() as u32), ) }; @@ -1627,19 +1760,17 @@ pub fn batch_and_prepare_binned_render_phase( // We're in indirect mode, so add an indirect parameters // index. GFBD::write_batch_indirect_parameters_metadata( - input_index.into(), key.0.indexed(), output_index, None, - &mut indirect_parameters_buffers, + &mut phase_indirect_parameters_buffers.buffers, *indirect_parameters_index, ); work_item_buffer.push( key.0.indexed(), PreprocessWorkItem { input_index: input_index.into(), - output_index, - indirect_parameters_index: *indirect_parameters_index, + output_or_indirect_parameters_index: *indirect_parameters_index, }, ); unbatchables @@ -1651,7 +1782,8 @@ pub fn batch_and_prepare_binned_render_phase( batch_set_index: None, }, }); - indirect_parameters_buffers + phase_indirect_parameters_buffers + .buffers .add_batch_set(key.0.indexed(), *indirect_parameters_index); *indirect_parameters_index += 1; } else { @@ -1659,8 +1791,7 @@ pub fn batch_and_prepare_binned_render_phase( key.0.indexed(), PreprocessWorkItem { input_index: input_index.into(), - output_index, - indirect_parameters_index: 0, + output_or_indirect_parameters_index: output_index, }, ); unbatchables @@ -1675,6 +1806,195 @@ pub fn batch_and_prepare_binned_render_phase( } } +/// The state that [`batch_and_prepare_binned_render_phase`] uses to construct +/// multidrawable batch sets. +/// +/// The [`batch_and_prepare_binned_render_phase`] system maintains two of these: +/// one for indexed meshes and one for non-indexed meshes. +struct MultidrawableBatchSetPreparer +where + BPI: BinnedPhaseItem, + GFBD: GetFullBatchData, +{ + /// The offset in the indirect parameters buffer at which the next indirect + /// parameters will be written. + indirect_parameters_index: u32, + /// The number of batch sets we've built so far for this mesh class. + batch_set_index: u32, + /// The number of work items we've emitted so far for this mesh class. + work_item_count: usize, + phantom: PhantomData<(BPI, GFBD)>, +} + +impl MultidrawableBatchSetPreparer +where + BPI: BinnedPhaseItem, + GFBD: GetFullBatchData, +{ + /// Creates a new [`MultidrawableBatchSetPreparer`] that will start writing + /// indirect parameters and batch sets at the given indices. + #[inline] + fn new(initial_indirect_parameters_index: u32, initial_batch_set_index: u32) -> Self { + MultidrawableBatchSetPreparer { + indirect_parameters_index: initial_indirect_parameters_index, + batch_set_index: initial_batch_set_index, + work_item_count: 0, + phantom: PhantomData, + } + } + + /// Creates batch sets and writes the GPU data needed to draw all visible + /// entities of one mesh class in the given batch set. + /// + /// The *mesh class* represents whether the mesh has indices or not. + #[inline] + fn prepare_multidrawable_binned_batch_set( + &mut self, + bins: &IndexMap, + output_index: &mut u32, + data_buffer: &mut UninitBufferVec, + indexed_work_item_buffer: &mut RawBufferVec, + mesh_class_buffers: &mut MeshClassIndirectParametersBuffers, + batch_sets: &mut Vec>, + ) where + IP: Clone + ShaderSize + WriteInto, + { + let current_indexed_batch_set_index = self.batch_set_index; + let current_output_index = *output_index; + + let indirect_parameters_base = self.indirect_parameters_index; + + // We're going to write the first entity into the batch set. Do this + // here so that we can preload the bin into cache as a side effect. + let Some((first_bin_key, first_bin)) = bins.iter().next() else { + return; + }; + let first_bin_len = first_bin.entities().len(); + let first_bin_entity = first_bin + .entities() + .keys() + .next() + .copied() + .unwrap_or(MainEntity::from(Entity::PLACEHOLDER)); + + // Traverse the batch set, processing each bin. + for bin in bins.values() { + // Record the first output index for this batch, as well as its own + // index. + mesh_class_buffers + .cpu_metadata + .push(IndirectParametersCpuMetadata { + base_output_index: *output_index, + batch_set_index: self.batch_set_index, + }); + + // Traverse the bin, pushing `PreprocessWorkItem`s for each entity + // within it. This is a hot loop, so make it as fast as possible. + for &input_index in bin.entities().values() { + indexed_work_item_buffer.push(PreprocessWorkItem { + input_index: *input_index, + output_or_indirect_parameters_index: self.indirect_parameters_index, + }); + } + + // Reserve space for the appropriate number of entities in the data + // buffer. Also, advance the output index and work item count. + let bin_entity_count = bin.entities().len(); + data_buffer.add_multiple(bin_entity_count); + *output_index += bin_entity_count as u32; + self.work_item_count += bin_entity_count; + + self.indirect_parameters_index += 1; + } + + // Reserve space for the bins in this batch set in the GPU buffers. + let bin_count = bins.len(); + mesh_class_buffers.gpu_metadata.add_multiple(bin_count); + mesh_class_buffers.data.add_multiple(bin_count); + + // Write the information the GPU will need about this batch set. + mesh_class_buffers.batch_sets.push(IndirectBatchSet { + indirect_parameters_base, + indirect_parameters_count: 0, + }); + + self.batch_set_index += 1; + + // Record the batch set. The render node later processes this record to + // render the batches. + batch_sets.push(BinnedRenderPhaseBatchSet { + first_batch: BinnedRenderPhaseBatch { + representative_entity: (Entity::PLACEHOLDER, first_bin_entity), + instance_range: current_output_index..(current_output_index + first_bin_len as u32), + extra_index: PhaseItemExtraIndex::maybe_indirect_parameters_index(NonMaxU32::new( + indirect_parameters_base, + )), + }, + bin_key: (*first_bin_key).clone(), + batch_count: self.indirect_parameters_index - indirect_parameters_base, + index: current_indexed_batch_set_index, + }); + } +} + +/// A system that gathers up the per-phase GPU buffers and inserts them into the +/// [`BatchedInstanceBuffers`] and [`IndirectParametersBuffers`] tables. +/// +/// This runs after the [`batch_and_prepare_binned_render_phase`] or +/// [`batch_and_prepare_sorted_render_phase`] systems. It takes the per-phase +/// [`PhaseBatchedInstanceBuffers`] and [`PhaseIndirectParametersBuffers`] +/// resources and inserts them into the global [`BatchedInstanceBuffers`] and +/// [`IndirectParametersBuffers`] tables. +/// +/// This system exists so that the [`batch_and_prepare_binned_render_phase`] and +/// [`batch_and_prepare_sorted_render_phase`] can run in parallel with one +/// another. If those two systems manipulated [`BatchedInstanceBuffers`] and +/// [`IndirectParametersBuffers`] directly, then they wouldn't be able to run in +/// parallel. +pub fn collect_buffers_for_phase( + mut phase_batched_instance_buffers: ResMut>, + mut phase_indirect_parameters_buffers: ResMut>, + mut batched_instance_buffers: ResMut< + BatchedInstanceBuffers, + >, + mut indirect_parameters_buffers: ResMut, +) where + PI: PhaseItem, + GFBD: GetFullBatchData + Send + Sync + 'static, +{ + // Insert the `PhaseBatchedInstanceBuffers` into the global table. Replace + // the contents of the per-phase resource with the old batched instance + // buffers in order to reuse allocations. + let untyped_phase_batched_instance_buffers = + mem::take(&mut phase_batched_instance_buffers.buffers); + if let Some(mut old_untyped_phase_batched_instance_buffers) = batched_instance_buffers + .phase_instance_buffers + .insert(TypeId::of::(), untyped_phase_batched_instance_buffers) + { + old_untyped_phase_batched_instance_buffers.clear(); + phase_batched_instance_buffers.buffers = old_untyped_phase_batched_instance_buffers; + } + + // Insert the `PhaseIndirectParametersBuffers` into the global table. + // Replace the contents of the per-phase resource with the old indirect + // parameters buffers in order to reuse allocations. + let untyped_phase_indirect_parameters_buffers = mem::replace( + &mut phase_indirect_parameters_buffers.buffers, + UntypedPhaseIndirectParametersBuffers::new( + indirect_parameters_buffers.allow_copies_from_indirect_parameter_buffers, + ), + ); + if let Some(mut old_untyped_phase_indirect_parameters_buffers) = indirect_parameters_buffers + .insert( + TypeId::of::(), + untyped_phase_indirect_parameters_buffers, + ) + { + old_untyped_phase_indirect_parameters_buffers.clear(); + phase_indirect_parameters_buffers.buffers = old_untyped_phase_indirect_parameters_buffers; + } +} + /// A system that writes all instance buffers to the GPU. pub fn write_batched_instance_buffers( render_device: Res, @@ -1684,26 +2004,31 @@ pub fn write_batched_instance_buffers( GFBD: GetFullBatchData, { let BatchedInstanceBuffers { - ref mut data_buffer, - ref mut work_item_buffers, - ref mut current_input_buffer, - ref mut previous_input_buffer, - ref mut late_indexed_indirect_parameters_buffer, - ref mut late_non_indexed_indirect_parameters_buffer, + current_input_buffer, + previous_input_buffer, + phase_instance_buffers, } = gpu_array_buffer.into_inner(); - data_buffer.write_buffer(&render_device); current_input_buffer .buffer .write_buffer(&render_device, &render_queue); previous_input_buffer .buffer .write_buffer(&render_device, &render_queue); - late_indexed_indirect_parameters_buffer.write_buffer(&render_device, &render_queue); - late_non_indexed_indirect_parameters_buffer.write_buffer(&render_device, &render_queue); - for view_work_item_buffers in work_item_buffers.values_mut() { - for phase_work_item_buffers in view_work_item_buffers.values_mut() { + for phase_instance_buffers in phase_instance_buffers.values_mut() { + let UntypedPhaseBatchedInstanceBuffers { + ref mut data_buffer, + ref mut work_item_buffers, + ref mut late_indexed_indirect_parameters_buffer, + ref mut late_non_indexed_indirect_parameters_buffer, + } = *phase_instance_buffers; + + data_buffer.write_buffer(&render_device); + late_indexed_indirect_parameters_buffer.write_buffer(&render_device, &render_queue); + late_non_indexed_indirect_parameters_buffer.write_buffer(&render_device, &render_queue); + + for phase_work_item_buffers in work_item_buffers.values_mut() { match *phase_work_item_buffers { PreprocessWorkItemBuffers::Direct(ref mut buffer_vec) => { buffer_vec.write_buffer(&render_device, &render_queue); @@ -1739,12 +2064,9 @@ pub fn write_batched_instance_buffers( pub fn clear_indirect_parameters_buffers( mut indirect_parameters_buffers: ResMut, ) { - indirect_parameters_buffers.indexed_data.clear(); - indirect_parameters_buffers.indexed_metadata.clear(); - indirect_parameters_buffers.indexed_batch_sets.clear(); - indirect_parameters_buffers.non_indexed_data.clear(); - indirect_parameters_buffers.non_indexed_metadata.clear(); - indirect_parameters_buffers.non_indexed_batch_sets.clear(); + for phase_indirect_parameters_buffers in indirect_parameters_buffers.values_mut() { + phase_indirect_parameters_buffers.clear(); + } } pub fn write_indirect_parameters_buffers( @@ -1752,26 +2074,43 @@ pub fn write_indirect_parameters_buffers( render_queue: Res, mut indirect_parameters_buffers: ResMut, ) { - indirect_parameters_buffers - .indexed_data - .write_buffer(&render_device); - indirect_parameters_buffers - .non_indexed_data - .write_buffer(&render_device); + for phase_indirect_parameters_buffers in indirect_parameters_buffers.values_mut() { + phase_indirect_parameters_buffers + .indexed + .data + .write_buffer(&render_device); + phase_indirect_parameters_buffers + .non_indexed + .data + .write_buffer(&render_device); - indirect_parameters_buffers - .indexed_metadata - .write_buffer(&render_device, &render_queue); - indirect_parameters_buffers - .non_indexed_metadata - .write_buffer(&render_device, &render_queue); + phase_indirect_parameters_buffers + .indexed + .cpu_metadata + .write_buffer(&render_device, &render_queue); + phase_indirect_parameters_buffers + .non_indexed + .cpu_metadata + .write_buffer(&render_device, &render_queue); - indirect_parameters_buffers - .indexed_batch_sets - .write_buffer(&render_device, &render_queue); - indirect_parameters_buffers - .non_indexed_batch_sets - .write_buffer(&render_device, &render_queue); + phase_indirect_parameters_buffers + .non_indexed + .gpu_metadata + .write_buffer(&render_device); + phase_indirect_parameters_buffers + .indexed + .gpu_metadata + .write_buffer(&render_device); + + phase_indirect_parameters_buffers + .indexed + .batch_sets + .write_buffer(&render_device, &render_queue); + phase_indirect_parameters_buffers + .non_indexed + .batch_sets + .write_buffer(&render_device, &render_queue); + } } #[cfg(test)] diff --git a/crates/bevy_render/src/batching/mod.rs b/crates/bevy_render/src/batching/mod.rs index 9569f2ce8c..40ce7ce3b4 100644 --- a/crates/bevy_render/src/batching/mod.rs +++ b/crates/bevy_render/src/batching/mod.rs @@ -4,16 +4,16 @@ use bevy_ecs::{ system::{ResMut, SystemParam, SystemParamItem}, }; use bytemuck::Pod; +use gpu_preprocessing::UntypedPhaseIndirectParametersBuffers; use nonmax::NonMaxU32; -use self::gpu_preprocessing::IndirectParametersBuffers; -use crate::{render_phase::PhaseItemExtraIndex, sync_world::MainEntity}; use crate::{ render_phase::{ - BinnedPhaseItem, CachedRenderPipelinePhaseItem, DrawFunctionId, SortedPhaseItem, - SortedRenderPhase, ViewBinnedRenderPhases, + BinnedPhaseItem, CachedRenderPipelinePhaseItem, DrawFunctionId, PhaseItemExtraIndex, + SortedPhaseItem, SortedRenderPhase, ViewBinnedRenderPhases, }, render_resource::{CachedRenderPipelineId, GpuArrayBufferable}, + sync_world::MainEntity, }; pub mod gpu_preprocessing; @@ -132,26 +132,29 @@ pub trait GetFullBatchData: GetBatchData { ) -> Option<(NonMaxU32, Option)>; /// Returns the index of the [`GetFullBatchData::BufferInputData`] that the - /// GPU preprocessing phase will use, for the binning path. + /// GPU preprocessing phase will use. /// /// We already inserted the [`GetFullBatchData::BufferInputData`] during the /// extraction phase before we got here, so this function shouldn't need to - /// look up any render data. If CPU instance buffer building is in use, this - /// function will never be called. + /// look up any render data. + /// + /// This function is currently only called for unbatchable entities when GPU + /// instance buffer building is in use. For batchable entities, the uniform + /// index is written during queuing (e.g. in `queue_material_meshes`). In + /// the case of CPU instance buffer building, the CPU writes the uniforms, + /// so there's no index to return. fn get_binned_index( param: &SystemParamItem, query_item: MainEntity, ) -> Option; - /// Writes the [`gpu_preprocessing::IndirectParametersMetadata`] necessary - /// to draw this batch into the given metadata buffer at the given index. + /// Writes the [`gpu_preprocessing::IndirectParametersGpuMetadata`] + /// necessary to draw this batch into the given metadata buffer at the given + /// index. /// /// This is only used if GPU culling is enabled (which requires GPU /// preprocessing). /// - /// * `mesh_index` describes the index of the first mesh instance in this - /// batch in the `MeshInputUniform` buffer. - /// /// * `indexed` is true if the mesh is indexed or false if it's non-indexed. /// /// * `base_output_index` is the index of the first mesh instance in this @@ -167,11 +170,10 @@ pub trait GetFullBatchData: GetBatchData { /// * `indirect_parameters_offset` is the index in that buffer at which to /// write the metadata. fn write_batch_indirect_parameters_metadata( - mesh_index: u32, indexed: bool, base_output_index: u32, batch_set_index: Option, - indirect_parameters_buffers: &mut IndirectParametersBuffers, + indirect_parameters_buffers: &mut UntypedPhaseIndirectParametersBuffers, indirect_parameters_offset: u32, ); } @@ -182,23 +184,10 @@ where BPI: BinnedPhaseItem, { for phase in phases.values_mut() { - phase.multidrawable_mesh_keys.clear(); - phase - .multidrawable_mesh_keys - .extend(phase.multidrawable_mesh_values.keys().cloned()); - phase.multidrawable_mesh_keys.sort_unstable(); - - phase.batchable_mesh_keys.clear(); - phase - .batchable_mesh_keys - .extend(phase.batchable_mesh_values.keys().cloned()); - phase.batchable_mesh_keys.sort_unstable(); - - phase.unbatchable_mesh_keys.clear(); - phase - .unbatchable_mesh_keys - .extend(phase.unbatchable_mesh_values.keys().cloned()); - phase.unbatchable_mesh_keys.sort_unstable(); + phase.multidrawable_meshes.sort_unstable_keys(); + phase.batchable_meshes.sort_unstable_keys(); + phase.unbatchable_meshes.sort_unstable_keys(); + phase.non_mesh_items.sort_unstable_keys(); } } diff --git a/crates/bevy_render/src/batching/no_gpu_preprocessing.rs b/crates/bevy_render/src/batching/no_gpu_preprocessing.rs index 7206bb4539..8bbbff8dd9 100644 --- a/crates/bevy_render/src/batching/no_gpu_preprocessing.rs +++ b/crates/bevy_render/src/batching/no_gpu_preprocessing.rs @@ -108,9 +108,9 @@ pub fn batch_and_prepare_binned_render_phase( for phase in phases.values_mut() { // Prepare batchables. - for key in &phase.batchable_mesh_keys { + for bin in phase.batchable_meshes.values_mut() { let mut batch_set: SmallVec<[BinnedRenderPhaseBatch; 1]> = smallvec![]; - for main_entity in phase.batchable_mesh_values[key].entities() { + for main_entity in bin.entities().keys() { let Some(buffer_data) = GFBD::get_binned_batch_data(&system_param_item, *main_entity) else { @@ -156,8 +156,7 @@ pub fn batch_and_prepare_binned_render_phase( } // Prepare unbatchables. - for key in &phase.unbatchable_mesh_keys { - let unbatchables = phase.unbatchable_mesh_values.get_mut(key).unwrap(); + for unbatchables in phase.unbatchable_meshes.values_mut() { for main_entity in unbatchables.entities.keys() { let Some(buffer_data) = GFBD::get_binned_batch_data(&system_param_item, *main_entity) diff --git a/crates/bevy_render/src/bindless.wgsl b/crates/bevy_render/src/bindless.wgsl new file mode 100644 index 0000000000..05517a1746 --- /dev/null +++ b/crates/bevy_render/src/bindless.wgsl @@ -0,0 +1,37 @@ +// Defines the common arrays used to access bindless resources. +// +// This need to be kept up to date with the `BINDING_NUMBERS` table in +// `bindless.rs`. +// +// You access these by indexing into the bindless index table, and from there +// indexing into the appropriate binding array. For example, to access the base +// color texture of a `StandardMaterial` in bindless mode, write +// `bindless_textures_2d[materials[slot].base_color_texture]`, where +// `materials` is the bindless index table and `slot` is the index into that +// table (which can be found in the `Mesh`). + +#define_import_path bevy_render::bindless + +#ifdef BINDLESS + +// Binding 0 is the bindless index table. +// Filtering samplers. +@group(2) @binding(1) var bindless_samplers_filtering: binding_array; +// Non-filtering samplers (nearest neighbor). +@group(2) @binding(2) var bindless_samplers_non_filtering: binding_array; +// Comparison samplers (typically for shadow mapping). +@group(2) @binding(3) var bindless_samplers_comparison: binding_array; +// 1D textures. +@group(2) @binding(4) var bindless_textures_1d: binding_array>; +// 2D textures. +@group(2) @binding(5) var bindless_textures_2d: binding_array>; +// 2D array textures. +@group(2) @binding(6) var bindless_textures_2d_array: binding_array>; +// 3D textures. +@group(2) @binding(7) var bindless_textures_3d: binding_array>; +// Cubemap textures. +@group(2) @binding(8) var bindless_textures_cube: binding_array>; +// Cubemap array textures. +@group(2) @binding(9) var bindless_textures_cube_array: binding_array>; + +#endif // BINDLESS diff --git a/crates/bevy_render/src/camera/camera.rs b/crates/bevy_render/src/camera/camera.rs index 298be27c5f..95218b7a59 100644 --- a/crates/bevy_render/src/camera/camera.rs +++ b/crates/bevy_render/src/camera/camera.rs @@ -23,9 +23,9 @@ use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ change_detection::DetectChanges, component::{Component, HookContext}, - entity::{Entity, EntityBorrow}, + entity::{ContainsEntity, Entity}, event::EventReader, - prelude::{require, With}, + prelude::With, query::Has, reflect::ReflectComponent, resource::Resource, @@ -34,7 +34,7 @@ use bevy_ecs::{ }; use bevy_image::Image; use bevy_math::{ops, vec2, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, UVec4, Vec2, Vec3}; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_reflect::prelude::*; use bevy_render_macros::ExtractComponent; use bevy_transform::components::{GlobalTransform, Transform}; @@ -44,6 +44,7 @@ use bevy_window::{ }; use core::ops::Range; use derive_more::derive::From; +use thiserror::Error; use tracing::warn; use wgpu::{BlendState, TextureFormat, TextureUsages}; @@ -53,7 +54,7 @@ use wgpu::{BlendState, TextureFormat, TextureUsages}; /// You can overlay multiple cameras in a single window using viewports to create effects like /// split screen, minimaps, and character viewers. #[derive(Reflect, Debug, Clone)] -#[reflect(Default)] +#[reflect(Default, Clone)] pub struct Viewport { /// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`]. /// (0,0) corresponds to the top-left corner @@ -75,6 +76,42 @@ impl Default for Viewport { } } +impl Viewport { + /// Cut the viewport rectangle so that it lies inside a rectangle of the + /// given size. + /// + /// If either of the viewport's position coordinates lies outside the given + /// dimensions, it will be moved just inside first. If either of the given + /// dimensions is zero, the position and size of the viewport rectangle will + /// both be set to zero in that dimension. + pub fn clamp_to_size(&mut self, size: UVec2) { + // If the origin of the viewport rect is outside, then adjust so that + // it's just barely inside. Then, cut off the part that is outside. + if self.physical_size.x + self.physical_position.x > size.x { + if self.physical_position.x < size.x { + self.physical_size.x = size.x - self.physical_position.x; + } else if size.x > 0 { + self.physical_position.x = size.x - 1; + self.physical_size.x = 1; + } else { + self.physical_position.x = 0; + self.physical_size.x = 0; + } + } + if self.physical_size.y + self.physical_position.y > size.y { + if self.physical_position.y < size.y { + self.physical_size.y = size.y - self.physical_position.y; + } else if size.y > 0 { + self.physical_position.y = size.y - 1; + self.physical_size.y = 1; + } else { + self.physical_position.y = 0; + self.physical_size.y = 0; + } + } + } +} + /// Settings to define a camera sub view. /// /// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the @@ -104,6 +141,7 @@ impl Default for Viewport { /// example have the following values: /// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9 #[derive(Debug, Clone, Copy, Reflect, PartialEq)] +#[reflect(Clone, PartialEq, Default)] pub struct SubCameraView { /// Size of the entire camera view pub full_size: UVec2, @@ -150,7 +188,7 @@ pub struct ComputedCameraValues { /// #[derive(Component, Clone, Copy, Reflect)] #[reflect(opaque)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct Exposure { /// pub ev100: f32, @@ -249,7 +287,7 @@ impl Default for PhysicalCameraParameters { /// Error returned when a conversion between world-space and viewport-space coordinates fails. /// /// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world]. -#[derive(Debug, Eq, PartialEq, Copy, Clone)] +#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)] pub enum ViewportConversionError { /// The pre-computed size of the viewport was not available. /// @@ -259,18 +297,22 @@ pub enum ViewportConversionError { /// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component, /// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle), /// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle). + #[error("pre-computed size of viewport not available")] NoViewportSize, /// The computed coordinate was beyond the `Camera`'s near plane. /// /// Only applicable when converting from world-space to viewport-space. + #[error("computed coordinate beyond `Camera`'s near plane")] PastNearPlane, /// The computed coordinate was beyond the `Camera`'s far plane. /// /// Only applicable when converting from world-space to viewport-space. + #[error("computed coordinate beyond `Camera`'s far plane")] PastFarPlane, /// The Normalized Device Coordinates could not be computed because the `camera_transform`, the /// `world_position`, or the projection matrix defined by [`CameraProjection`] contained `NAN` /// (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]). + #[error("found NaN while computing NDC")] InvalidData, } @@ -290,7 +332,7 @@ pub enum ViewportConversionError { /// [`Camera2d`]: https://docs.rs/bevy/latest/bevy/core_pipeline/core_2d/struct.Camera2d.html /// [`Camera3d`]: https://docs.rs/bevy/latest/bevy/core_pipeline/core_3d/struct.Camera3d.html #[derive(Component, Debug, Reflect, Clone)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[component(on_add = warn_on_no_render_graph)] #[require( Frustum, @@ -310,7 +352,7 @@ pub struct Camera { /// camera will not be rendered. pub is_active: bool, /// Computed values for this camera, such as the projection matrix and the render target size. - #[reflect(ignore)] + #[reflect(ignore, clone)] pub computed: ComputedCameraValues, /// The "target" that this camera will render to. pub target: RenderTarget, @@ -319,7 +361,7 @@ pub struct Camera { pub hdr: bool, // todo: reflect this when #6042 lands /// The [`CameraOutputMode`] for this camera. - #[reflect(ignore)] + #[reflect(ignore, clone)] pub output_mode: CameraOutputMode, /// If this is enabled, a previous camera exists that shares this camera's render target, and this camera has MSAA enabled, then the previous camera's /// outputs will be written to the intermediate multi-sampled render target textures for this camera. This enables cameras with MSAA enabled to @@ -472,10 +514,10 @@ impl Camera { camera_transform: &GlobalTransform, world_position: Vec3, ) -> Result { - let target_size = self - .logical_viewport_size() + let target_rect = self + .logical_viewport_rect() .ok_or(ViewportConversionError::NoViewportSize)?; - let ndc_space_coords = self + let mut ndc_space_coords = self .world_to_ndc(camera_transform, world_position) .ok_or(ViewportConversionError::InvalidData)?; // NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space @@ -486,10 +528,12 @@ impl Camera { return Err(ViewportConversionError::PastFarPlane); } - // Once in NDC space, we can discard the z element and rescale x/y to fit the screen - let mut viewport_position = (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_size; // Flip the Y co-ordinate origin from the bottom to the top. - viewport_position.y = target_size.y - viewport_position.y; + ndc_space_coords.y = -ndc_space_coords.y; + + // Once in NDC space, we can discard the z element and map x/y to the viewport rect + let viewport_position = + (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min; Ok(viewport_position) } @@ -508,10 +552,10 @@ impl Camera { camera_transform: &GlobalTransform, world_position: Vec3, ) -> Result { - let target_size = self - .logical_viewport_size() + let target_rect = self + .logical_viewport_rect() .ok_or(ViewportConversionError::NoViewportSize)?; - let ndc_space_coords = self + let mut ndc_space_coords = self .world_to_ndc(camera_transform, world_position) .ok_or(ViewportConversionError::InvalidData)?; // NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space @@ -525,10 +569,12 @@ impl Camera { // Stretching ndc depth to value via near plane and negating result to be in positive room again. let depth = -self.depth_ndc_to_view_z(ndc_space_coords.z); - // Once in NDC space, we can discard the z element and rescale x/y to fit the screen - let mut viewport_position = (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_size; // Flip the Y co-ordinate origin from the bottom to the top. - viewport_position.y = target_size.y - viewport_position.y; + ndc_space_coords.y = -ndc_space_coords.y; + + // Once in NDC space, we can discard the z element and map x/y to the viewport rect + let viewport_position = + (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min; Ok(viewport_position.extend(depth)) } @@ -548,15 +594,16 @@ impl Camera { pub fn viewport_to_world( &self, camera_transform: &GlobalTransform, - mut viewport_position: Vec2, + viewport_position: Vec2, ) -> Result { - let target_size = self - .logical_viewport_size() + let target_rect = self + .logical_viewport_rect() .ok_or(ViewportConversionError::NoViewportSize)?; + let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size(); // Flip the Y co-ordinate origin from the top to the bottom. - viewport_position.y = target_size.y - viewport_position.y; - let ndc = viewport_position * 2. / target_size - Vec2::ONE; + rect_relative.y = 1.0 - rect_relative.y; + let ndc = rect_relative * 2. - Vec2::ONE; let ndc_to_world = camera_transform.compute_matrix() * self.computed.clip_from_view.inverse(); let world_near_plane = ndc_to_world.project_point3(ndc.extend(1.)); @@ -586,14 +633,17 @@ impl Camera { pub fn viewport_to_world_2d( &self, camera_transform: &GlobalTransform, - mut viewport_position: Vec2, + viewport_position: Vec2, ) -> Result { - let target_size = self - .logical_viewport_size() + let target_rect = self + .logical_viewport_rect() .ok_or(ViewportConversionError::NoViewportSize)?; + let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size(); + // Flip the Y co-ordinate origin from the top to the bottom. - viewport_position.y = target_size.y - viewport_position.y; - let ndc = viewport_position * 2. / target_size - Vec2::ONE; + rect_relative.y = 1.0 - rect_relative.y; + + let ndc = rect_relative * 2. - Vec2::ONE; let world_near_plane = self .ndc_to_world(camera_transform, ndc.extend(1.)) @@ -700,7 +750,7 @@ impl Default for CameraOutputMode { /// Configures the [`RenderGraph`](crate::render_graph::RenderGraph) name assigned to be run for a given [`Camera`] entity. #[derive(Component, Debug, Deref, DerefMut, Reflect, Clone)] #[reflect(opaque)] -#[reflect(Component, Debug)] +#[reflect(Component, Debug, Clone)] pub struct CameraRenderGraph(InternedRenderSubGraph); impl CameraRenderGraph { @@ -720,6 +770,7 @@ impl CameraRenderGraph { /// The "target" that a [`Camera`] will render to. For example, this could be a [`Window`] /// swapchain or an [`Image`]. #[derive(Debug, Clone, Reflect, From)] +#[reflect(Clone)] pub enum RenderTarget { /// Window to which the camera's view is rendered. Window(WindowRef), @@ -732,6 +783,7 @@ pub enum RenderTarget { /// A render target that renders to an [`Image`]. #[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[reflect(Clone, PartialEq, Hash)] pub struct ImageRenderTarget { /// The image to render to. pub handle: Handle, @@ -765,6 +817,7 @@ impl Default for RenderTarget { /// /// Once we have this we shouldn't need to resolve it down anymore. #[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)] +#[reflect(Clone, PartialEq, Hash)] pub enum NormalizedRenderTarget { /// Window to which the camera's view is rendered. Window(NormalizedWindowRef), @@ -972,18 +1025,13 @@ pub fn camera_system( } } } - // This check is needed because when changing WindowMode to SizedFullscreen, the viewport may have invalid + // This check is needed because when changing WindowMode to Fullscreen, the viewport may have invalid // arguments due to a sudden change on the window size to a lower value. // If the size of the window is lower, the viewport will match that lower value. if let Some(viewport) = &mut camera.viewport { let target_info = &new_computed_target_info; if let Some(target) = target_info { - if viewport.physical_size.x > target.physical_size.x { - viewport.physical_size.x = target.physical_size.x; - } - if viewport.physical_size.y > target.physical_size.y { - viewport.physical_size.y = target.physical_size.y; - } + viewport.clamp_to_size(target.physical_size); } } camera.computed.target_info = new_computed_target_info; @@ -1014,7 +1062,7 @@ pub fn camera_system( /// This component lets you control the [`TextureUsages`] field of the main texture generated for the camera #[derive(Component, ExtractComponent, Clone, Copy, Reflect)] #[reflect(opaque)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct CameraMainTextureUsages(pub TextureUsages); impl Default for CameraMainTextureUsages { fn default() -> Self { @@ -1265,7 +1313,7 @@ pub fn sort_cameras( /// /// [`OrthographicProjection`]: crate::camera::OrthographicProjection #[derive(Component, Clone, Default, Reflect)] -#[reflect(Default, Component)] +#[reflect(Default, Component, Clone)] pub struct TemporalJitter { /// Offset is in range [-0.5, 0.5]. pub offset: Vec2, diff --git a/crates/bevy_render/src/camera/camera_driver_node.rs b/crates/bevy_render/src/camera/camera_driver_node.rs index 7fa221235f..8be5a345b4 100644 --- a/crates/bevy_render/src/camera/camera_driver_node.rs +++ b/crates/bevy_render/src/camera/camera_driver_node.rs @@ -4,8 +4,8 @@ use crate::{ renderer::RenderContext, view::ExtractedWindows, }; -use bevy_ecs::{entity::EntityBorrow, prelude::QueryState, world::World}; -use bevy_platform_support::collections::HashSet; +use bevy_ecs::{entity::ContainsEntity, prelude::QueryState, world::World}; +use bevy_platform::collections::HashSet; use wgpu::{LoadOp, Operations, RenderPassColorAttachment, RenderPassDescriptor, StoreOp}; pub struct CameraDriverNode { diff --git a/crates/bevy_render/src/camera/clear_color.rs b/crates/bevy_render/src/camera/clear_color.rs index 49dbdea76b..157bcf8998 100644 --- a/crates/bevy_render/src/camera/clear_color.rs +++ b/crates/bevy_render/src/camera/clear_color.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; /// For a camera, specifies the color used to clear the viewport before rendering. #[derive(Reflect, Serialize, Deserialize, Copy, Clone, Debug, Default, From)] -#[reflect(Serialize, Deserialize, Default)] +#[reflect(Serialize, Deserialize, Default, Clone)] pub enum ClearColorConfig { /// The clear color is taken from the world's [`ClearColor`] resource. #[default] @@ -26,7 +26,7 @@ pub enum ClearColorConfig { /// This color appears as the "background" color for simple apps, /// when there are portions of the screen with nothing rendered. #[derive(Resource, Clone, Debug, Deref, DerefMut, ExtractResource, Reflect)] -#[reflect(Resource, Default, Debug)] +#[reflect(Resource, Default, Debug, Clone)] pub struct ClearColor(pub Color); /// Match the dark gray bevy website code block color by default. diff --git a/crates/bevy_render/src/camera/manual_texture_view.rs b/crates/bevy_render/src/camera/manual_texture_view.rs index a9c302eaf4..56eff5612a 100644 --- a/crates/bevy_render/src/camera/manual_texture_view.rs +++ b/crates/bevy_render/src/camera/manual_texture_view.rs @@ -2,13 +2,13 @@ use crate::{extract_resource::ExtractResource, render_resource::TextureView}; use bevy_ecs::{prelude::Component, reflect::ReflectComponent, resource::Resource}; use bevy_image::BevyDefault as _; use bevy_math::UVec2; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::prelude::*; use wgpu::TextureFormat; /// A unique id that corresponds to a specific [`ManualTextureView`] in the [`ManualTextureViews`] collection. #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Component, Reflect)] -#[reflect(Component, Default, Debug, PartialEq, Hash)] +#[reflect(Component, Default, Debug, PartialEq, Hash, Clone)] pub struct ManualTextureViewHandle(pub u32); /// A manually managed [`TextureView`] for use as a [`crate::camera::RenderTarget`]. diff --git a/crates/bevy_render/src/camera/mod.rs b/crates/bevy_render/src/camera/mod.rs index 9b3c6907f6..4c77021bad 100644 --- a/crates/bevy_render/src/camera/mod.rs +++ b/crates/bevy_render/src/camera/mod.rs @@ -15,7 +15,7 @@ use crate::{ render_graph::RenderGraph, ExtractSchedule, Render, RenderApp, RenderSet, }; use bevy_app::{App, Plugin}; -use bevy_ecs::schedule::IntoSystemConfigs; +use bevy_ecs::schedule::IntoScheduleConfigs; #[derive(Default)] pub struct CameraPlugin; diff --git a/crates/bevy_render/src/camera/projection.rs b/crates/bevy_render/src/camera/projection.rs index 2b75dab047..e3f95cb036 100644 --- a/crates/bevy_render/src/camera/projection.rs +++ b/crates/bevy_render/src/camera/projection.rs @@ -129,7 +129,7 @@ mod sealed { /// /// The contained dynamic object can be downcast into a static type using [`CustomProjection::get`]. #[derive(Component, Debug, Reflect, Deref, DerefMut)] -#[reflect(Default)] +#[reflect(Default, Clone)] pub struct CustomProjection { #[reflect(ignore)] #[deref] @@ -221,7 +221,7 @@ impl CustomProjection { /// /// [`Camera`]: crate::camera::Camera #[derive(Component, Debug, Clone, Reflect, From)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub enum Projection { Perspective(PerspectiveProjection), Orthographic(OrthographicProjection), @@ -298,7 +298,7 @@ impl Default for Projection { /// A 3D camera projection in which distant objects appear smaller than close objects. #[derive(Debug, Clone, Reflect)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct PerspectiveProjection { /// The vertical field of view (FOV) in radians. /// @@ -433,7 +433,7 @@ impl Default for PerspectiveProjection { /// }); /// ``` #[derive(Default, Debug, Clone, Copy, Reflect, Serialize, Deserialize)] -#[reflect(Serialize, Deserialize)] +#[reflect(Serialize, Deserialize, Default, Clone)] pub enum ScalingMode { /// Match the viewport size. /// @@ -490,7 +490,7 @@ pub enum ScalingMode { /// }); /// ``` #[derive(Debug, Clone, Reflect)] -#[reflect(Debug, FromWorld)] +#[reflect(Debug, FromWorld, Clone)] pub struct OrthographicProjection { /// The distance of the near clipping plane in world units. /// diff --git a/crates/bevy_render/src/diagnostic/internal.rs b/crates/bevy_render/src/diagnostic/internal.rs index 8445ebe54d..ec226c760b 100644 --- a/crates/bevy_render/src/diagnostic/internal.rs +++ b/crates/bevy_render/src/diagnostic/internal.rs @@ -8,14 +8,14 @@ use std::thread::{self, ThreadId}; use bevy_diagnostic::{Diagnostic, DiagnosticMeasurement, DiagnosticPath, DiagnosticsStore}; use bevy_ecs::resource::Resource; use bevy_ecs::system::{Res, ResMut}; -use bevy_platform_support::time::Instant; +use bevy_platform::time::Instant; use std::sync::Mutex; use wgpu::{ Buffer, BufferDescriptor, BufferUsages, CommandEncoder, ComputePass, Features, MapMode, - PipelineStatisticsTypes, QuerySet, QuerySetDescriptor, QueryType, Queue, RenderPass, + PipelineStatisticsTypes, QuerySet, QuerySetDescriptor, QueryType, RenderPass, }; -use crate::renderer::{RenderDevice, WgpuWrapper}; +use crate::renderer::{RenderAdapterInfo, RenderDevice, RenderQueue, WgpuWrapper}; use super::RecordDiagnostics; @@ -32,6 +32,8 @@ struct DiagnosticsRecorderInternal { current_frame: Mutex, submitted_frames: Vec, finished_frames: Vec, + #[cfg(feature = "tracing-tracy")] + tracy_gpu_context: tracy_client::GpuContext, } /// Records diagnostics into [`QuerySet`]'s keeping track of the mapping between @@ -41,21 +43,31 @@ pub struct DiagnosticsRecorder(WgpuWrapper); impl DiagnosticsRecorder { /// Creates the new `DiagnosticsRecorder`. - pub fn new(device: &RenderDevice, queue: &Queue) -> DiagnosticsRecorder { + pub fn new( + adapter_info: &RenderAdapterInfo, + device: &RenderDevice, + queue: &RenderQueue, + ) -> DiagnosticsRecorder { let features = device.features(); - let timestamp_period_ns = if features.contains(Features::TIMESTAMP_QUERY) { - queue.get_timestamp_period() - } else { - 0.0 - }; + #[cfg(feature = "tracing-tracy")] + let tracy_gpu_context = + super::tracy_gpu::new_tracy_gpu_context(adapter_info, device, queue); + let _ = adapter_info; // Prevent unused variable warnings when tracing-tracy is not enabled DiagnosticsRecorder(WgpuWrapper::new(DiagnosticsRecorderInternal { - timestamp_period_ns, + timestamp_period_ns: queue.get_timestamp_period(), features, - current_frame: Mutex::new(FrameData::new(device, features)), + current_frame: Mutex::new(FrameData::new( + device, + features, + #[cfg(feature = "tracing-tracy")] + tracy_gpu_context.clone(), + )), submitted_frames: Vec::new(), finished_frames: Vec::new(), + #[cfg(feature = "tracing-tracy")] + tracy_gpu_context, })) } @@ -86,7 +98,7 @@ impl DiagnosticsRecorder { /// Copies data from [`QuerySet`]'s to a [`Buffer`], after which it can be downloaded to CPU. /// - /// Should be called before [`DiagnosticsRecorder::finish_frame`] + /// Should be called before [`DiagnosticsRecorder::finish_frame`]. pub fn resolve(&mut self, encoder: &mut CommandEncoder) { self.current_frame_mut().resolve(encoder); } @@ -102,6 +114,9 @@ impl DiagnosticsRecorder { device: &RenderDevice, callback: impl FnOnce(RenderDiagnostics) + Send + Sync + 'static, ) { + #[cfg(feature = "tracing-tracy")] + let tracy_gpu_context = self.0.tracy_gpu_context.clone(); + let internal = &mut self.0; internal .current_frame @@ -112,7 +127,12 @@ impl DiagnosticsRecorder { // reuse one of the finished frames, if we can let new_frame = match internal.finished_frames.pop() { Some(frame) => frame, - None => FrameData::new(device, internal.features), + None => FrameData::new( + device, + internal.features, + #[cfg(feature = "tracing-tracy")] + tracy_gpu_context, + ), }; let old_frame = core::mem::replace( @@ -169,10 +189,16 @@ struct FrameData { closed_spans: Vec, is_mapped: Arc, callback: Option>, + #[cfg(feature = "tracing-tracy")] + tracy_gpu_context: tracy_client::GpuContext, } impl FrameData { - fn new(device: &RenderDevice, features: Features) -> FrameData { + fn new( + device: &RenderDevice, + features: Features, + #[cfg(feature = "tracing-tracy")] tracy_gpu_context: tracy_client::GpuContext, + ) -> FrameData { let wgpu_device = device.wgpu_device(); let mut buffer_size = 0; @@ -237,6 +263,8 @@ impl FrameData { closed_spans: Vec::new(), is_mapped: Arc::new(AtomicBool::new(false)), callback: None, + #[cfg(feature = "tracing-tracy")] + tracy_gpu_context, } } @@ -502,6 +530,19 @@ impl FrameData { let end = timestamps[end as usize] as f64; let value = (end - begin) * (timestamp_period_ns as f64) / 1e6; + #[cfg(feature = "tracing-tracy")] + { + // Calling span_alloc() and end_zone() here instead of in open_span() and close_span() means that tracy does not know where each GPU command was recorded on the CPU timeline. + // Unfortunately we must do it this way, because tracy does not play nicely with multithreaded command recording. The start/end pairs would get all mixed up. + // The GPU spans themselves are still accurate though, and it's probably safe to assume that each GPU span in frame N belongs to the corresponding CPU render node span from frame N-1. + let name = &self.path_components[span.path_range.clone()].join("/"); + let mut tracy_gpu_span = + self.tracy_gpu_context.span_alloc(name, "", "", 0).unwrap(); + tracy_gpu_span.end_zone(); + tracy_gpu_span.upload_timestamp_start(begin as i64); + tracy_gpu_span.upload_timestamp_end(end as i64); + } + diagnostics.push(RenderDiagnostic { path: self.diagnostic_path(&span.path_range, "elapsed_gpu"), suffix: "ms", diff --git a/crates/bevy_render/src/diagnostic/mod.rs b/crates/bevy_render/src/diagnostic/mod.rs index 09b6052c10..7f046036a9 100644 --- a/crates/bevy_render/src/diagnostic/mod.rs +++ b/crates/bevy_render/src/diagnostic/mod.rs @@ -3,13 +3,15 @@ //! For more info, see [`RenderDiagnosticsPlugin`]. pub(crate) mod internal; +#[cfg(feature = "tracing-tracy")] +mod tracy_gpu; use alloc::{borrow::Cow, sync::Arc}; use core::marker::PhantomData; use bevy_app::{App, Plugin, PreUpdate}; -use crate::RenderApp; +use crate::{renderer::RenderAdapterInfo, RenderApp}; use self::internal::{ sync_diagnostics, DiagnosticsRecorder, Pass, RenderDiagnosticsMutex, WriteTimestamp, @@ -20,8 +22,8 @@ use super::{RenderDevice, RenderQueue}; /// Enables collecting render diagnostics, such as CPU/GPU elapsed time per render pass, /// as well as pipeline statistics (number of primitives, number of shader invocations, etc). /// -/// To access the diagnostics, you can use [`DiagnosticsStore`](bevy_diagnostic::DiagnosticsStore) resource, -/// or add [`LogDiagnosticsPlugin`](bevy_diagnostic::LogDiagnosticsPlugin). +/// To access the diagnostics, you can use the [`DiagnosticsStore`](bevy_diagnostic::DiagnosticsStore) resource, +/// add [`LogDiagnosticsPlugin`](bevy_diagnostic::LogDiagnosticsPlugin), or use [Tracy](https://github.com/bevyengine/bevy/blob/main/docs/profiling.md#tracy-renderqueue). /// /// To record diagnostics in your own passes: /// 1. First, obtain the diagnostic recorder using [`RenderContext::diagnostic_recorder`](crate::renderer::RenderContext::diagnostic_recorder). @@ -62,9 +64,10 @@ impl Plugin for RenderDiagnosticsPlugin { return; }; + let adapter_info = render_app.world().resource::(); let device = render_app.world().resource::(); let queue = render_app.world().resource::(); - render_app.insert_resource(DiagnosticsRecorder::new(device, queue)); + render_app.insert_resource(DiagnosticsRecorder::new(adapter_info, device, queue)); } } diff --git a/crates/bevy_render/src/diagnostic/tracy_gpu.rs b/crates/bevy_render/src/diagnostic/tracy_gpu.rs new file mode 100644 index 0000000000..c059b8baa5 --- /dev/null +++ b/crates/bevy_render/src/diagnostic/tracy_gpu.rs @@ -0,0 +1,67 @@ +use crate::renderer::{RenderAdapterInfo, RenderDevice, RenderQueue}; +use tracy_client::{Client, GpuContext, GpuContextType}; +use wgpu::{ + Backend, BufferDescriptor, BufferUsages, CommandEncoderDescriptor, Maintain, MapMode, + QuerySetDescriptor, QueryType, QUERY_SIZE, +}; + +pub fn new_tracy_gpu_context( + adapter_info: &RenderAdapterInfo, + device: &RenderDevice, + queue: &RenderQueue, +) -> GpuContext { + let tracy_gpu_backend = match adapter_info.backend { + Backend::Vulkan => GpuContextType::Vulkan, + Backend::Dx12 => GpuContextType::Direct3D12, + Backend::Gl => GpuContextType::OpenGL, + Backend::Metal | Backend::BrowserWebGpu | Backend::Empty => GpuContextType::Invalid, + }; + + let tracy_client = Client::running().unwrap(); + tracy_client + .new_gpu_context( + Some("RenderQueue"), + tracy_gpu_backend, + initial_timestamp(device, queue), + queue.get_timestamp_period(), + ) + .unwrap() +} + +// Code copied from https://github.com/Wumpf/wgpu-profiler/blob/f9de342a62cb75f50904a98d11dd2bbeb40ceab8/src/tracy.rs +fn initial_timestamp(device: &RenderDevice, queue: &RenderQueue) -> i64 { + let query_set = device.wgpu_device().create_query_set(&QuerySetDescriptor { + label: None, + ty: QueryType::Timestamp, + count: 1, + }); + + let resolve_buffer = device.create_buffer(&BufferDescriptor { + label: None, + size: QUERY_SIZE as _, + usage: BufferUsages::QUERY_RESOLVE | BufferUsages::COPY_SRC, + mapped_at_creation: false, + }); + + let map_buffer = device.create_buffer(&BufferDescriptor { + label: None, + size: QUERY_SIZE as _, + usage: BufferUsages::MAP_READ | BufferUsages::COPY_DST, + mapped_at_creation: false, + }); + + let mut timestamp_encoder = device.create_command_encoder(&CommandEncoderDescriptor::default()); + timestamp_encoder.write_timestamp(&query_set, 0); + timestamp_encoder.resolve_query_set(&query_set, 0..1, &resolve_buffer, 0); + // Workaround for https://github.com/gfx-rs/wgpu/issues/6406 + // TODO when that bug is fixed, merge these encoders together again + let mut copy_encoder = device.create_command_encoder(&CommandEncoderDescriptor::default()); + copy_encoder.copy_buffer_to_buffer(&resolve_buffer, 0, &map_buffer, 0, QUERY_SIZE as _); + queue.submit([timestamp_encoder.finish(), copy_encoder.finish()]); + + map_buffer.slice(..).map_async(MapMode::Read, |_| ()); + device.poll(Maintain::Wait); + + let view = map_buffer.slice(..).get_mapped_range(); + i64::from_le_bytes((*view).try_into().unwrap()) +} diff --git a/crates/bevy_render/src/experimental/occlusion_culling/mesh_preprocess_types.wgsl b/crates/bevy_render/src/experimental/occlusion_culling/mesh_preprocess_types.wgsl index af9deab8b8..a597fb0537 100644 --- a/crates/bevy_render/src/experimental/occlusion_culling/mesh_preprocess_types.wgsl +++ b/crates/bevy_render/src/experimental/occlusion_culling/mesh_preprocess_types.wgsl @@ -15,12 +15,13 @@ struct MeshInput { first_index_index: u32, index_count: u32, current_skin_index: u32, - previous_skin_index: u32, // Low 16 bits: index of the material inside the bind group data. // High 16 bits: index of the lightmap in the binding array. material_and_lightmap_bind_group_slot: u32, - pad_a: u32, - pad_b: u32, + timestamp: u32, + // User supplied index to identify the mesh instance + tag: u32, + pad: u32, } // The `wgpu` indirect parameters structure. This is a union of two structures. @@ -46,17 +47,20 @@ struct IndirectParametersNonIndexed { first_instance: u32, } -struct IndirectParametersMetadata { - mesh_index: u32, +struct IndirectParametersCpuMetadata { base_output_index: u32, batch_set_index: u32, +} + +struct IndirectParametersGpuMetadata { + mesh_index: u32, #ifdef WRITE_INDIRECT_PARAMETERS_METADATA early_instance_count: atomic, late_instance_count: atomic, -#else +#else // WRITE_INDIRECT_PARAMETERS_METADATA early_instance_count: u32, late_instance_count: u32, -#endif +#endif // WRITE_INDIRECT_PARAMETERS_METADATA } struct IndirectBatchSet { diff --git a/crates/bevy_render/src/experimental/occlusion_culling/mod.rs b/crates/bevy_render/src/experimental/occlusion_culling/mod.rs index 82f299d3a0..a3b067e19f 100644 --- a/crates/bevy_render/src/experimental/occlusion_culling/mod.rs +++ b/crates/bevy_render/src/experimental/occlusion_culling/mod.rs @@ -5,10 +5,13 @@ use bevy_app::{App, Plugin}; use bevy_asset::{load_internal_asset, weak_handle, Handle}; -use bevy_ecs::{component::Component, prelude::ReflectComponent}; +use bevy_ecs::{component::Component, entity::Entity, prelude::ReflectComponent}; use bevy_reflect::{prelude::ReflectDefault, Reflect}; -use crate::{extract_component::ExtractComponent, render_resource::Shader}; +use crate::{ + extract_component::ExtractComponent, + render_resource::{Shader, TextureView}, +}; /// The handle to the `mesh_preprocess_types.wgsl` compute shader. pub const MESH_PREPROCESS_TYPES_SHADER_HANDLE: Handle = @@ -83,5 +86,31 @@ impl Plugin for OcclusionCullingPlugin { /// [*two-phase occlusion culling*]: /// https://medium.com/@mil_kru/two-pass-occlusion-culling-4100edcad501 #[derive(Component, ExtractComponent, Clone, Copy, Default, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct OcclusionCulling; + +/// A render-world component that contains resources necessary to perform +/// occlusion culling on any view other than a camera. +/// +/// Bevy automatically places this component on views created for shadow +/// mapping. You don't ordinarily need to add this component yourself. +#[derive(Clone, Component)] +pub struct OcclusionCullingSubview { + /// A texture view of the Z-buffer. + pub depth_texture_view: TextureView, + /// The size of the texture along both dimensions. + /// + /// Because [`OcclusionCullingSubview`] is only currently used for shadow + /// maps, they're guaranteed to have sizes equal to a power of two, so we + /// don't have to store the two dimensions individually here. + pub depth_texture_size: u32, +} + +/// A render-world component placed on each camera that stores references to all +/// entities other than cameras that need occlusion culling. +/// +/// Bevy automatically places this component on cameras that are drawing +/// shadows, when those shadows come from lights with occlusion culling enabled. +/// You don't ordinarily need to add this component yourself. +#[derive(Clone, Component)] +pub struct OcclusionCullingSubviewEntities(pub Vec); diff --git a/crates/bevy_render/src/extract_component.rs b/crates/bevy_render/src/extract_component.rs index f771998428..19d15a2b86 100644 --- a/crates/bevy_render/src/extract_component.rs +++ b/crates/bevy_render/src/extract_component.rs @@ -154,7 +154,7 @@ fn prepare_uniform_components( ) }) .collect::>(); - commands.insert_or_spawn_batch(entities); + commands.try_insert_batch(entities); } /// This plugin extracts the components into the render world for synced entities. @@ -212,7 +212,7 @@ fn extract_components( } } *previous_len = values.len(); - commands.insert_or_spawn_batch(values); + commands.try_insert_batch(values); } /// This system extracts all components of the corresponding [`ExtractComponent`], for entities that are visible and synced via [`crate::sync_world::SyncToRenderWorld`]. @@ -232,5 +232,5 @@ fn extract_visible_components( } } *previous_len = values.len(); - commands.insert_or_spawn_batch(values); + commands.try_insert_batch(values); } diff --git a/crates/bevy_render/src/extract_param.rs b/crates/bevy_render/src/extract_param.rs index 6ac7079bc5..f543098474 100644 --- a/crates/bevy_render/src/extract_param.rs +++ b/crates/bevy_render/src/extract_param.rs @@ -2,7 +2,10 @@ use crate::MainWorld; use bevy_ecs::{ component::Tick, prelude::*, - system::{ReadOnlySystemParam, SystemMeta, SystemParam, SystemParamItem, SystemState}, + system::{ + ReadOnlySystemParam, SystemMeta, SystemParam, SystemParamItem, SystemParamValidationError, + SystemState, + }, world::unsafe_world_cell::UnsafeWorldCell, }; use core::ops::{Deref, DerefMut}; @@ -79,14 +82,15 @@ where #[inline] unsafe fn validate_param( state: &Self::State, - system_meta: &SystemMeta, + _system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { // SAFETY: Read-only access to world data registered in `init_state`. let result = unsafe { world.get_resource_by_id(state.main_world_state) }; let Some(main_world) = result else { - system_meta.try_warn_param::<&World>(); - return false; + return Err(SystemParamValidationError::invalid::( + "`MainWorld` resource does not exist", + )); }; // SAFETY: Type is guaranteed by `SystemState`. let main_world: &World = unsafe { main_world.deref() }; diff --git a/crates/bevy_render/src/globals.rs b/crates/bevy_render/src/globals.rs index b21b37ab8d..c05d96c4c9 100644 --- a/crates/bevy_render/src/globals.rs +++ b/crates/bevy_render/src/globals.rs @@ -46,7 +46,7 @@ fn extract_time(mut commands: Commands, time: Extract>) { /// Contains global values useful when writing shaders. /// Currently only contains values related to time. #[derive(Default, Clone, Resource, ExtractResource, Reflect, ShaderType)] -#[reflect(Resource, Default)] +#[reflect(Resource, Default, Clone)] pub struct GlobalsUniform { /// The time since startup in seconds. /// Wraps to 0 after 1 hour. diff --git a/crates/bevy_render/src/gpu_component_array_buffer.rs b/crates/bevy_render/src/gpu_component_array_buffer.rs index ac0f471a4a..b3f78f5bfb 100644 --- a/crates/bevy_render/src/gpu_component_array_buffer.rs +++ b/crates/bevy_render/src/gpu_component_array_buffer.rs @@ -6,7 +6,7 @@ use crate::{ use bevy_app::{App, Plugin}; use bevy_ecs::{ prelude::{Component, Entity}, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Commands, Query, Res, ResMut}, }; use core::marker::PhantomData; @@ -53,7 +53,7 @@ fn prepare_gpu_component_array_buffers( .iter() .map(|(entity, component)| (entity, gpu_array_buffer.push(component.clone()))) .collect::>(); - commands.insert_or_spawn_batch(entities); + commands.try_insert_batch(entities); gpu_array_buffer.write_buffer(&render_device, &render_queue); } diff --git a/crates/bevy_render/src/gpu_readback.rs b/crates/bevy_render/src/gpu_readback.rs index 012ec1a6cb..02f0c2d1db 100644 --- a/crates/bevy_render/src/gpu_readback.rs +++ b/crates/bevy_render/src/gpu_readback.rs @@ -15,7 +15,7 @@ use async_channel::{Receiver, Sender}; use bevy_app::{App, Plugin}; use bevy_asset::Handle; use bevy_derive::{Deref, DerefMut}; -use bevy_ecs::schedule::IntoSystemConfigs; +use bevy_ecs::schedule::IntoScheduleConfigs; use bevy_ecs::{ change_detection::ResMut, entity::Entity, @@ -24,7 +24,7 @@ use bevy_ecs::{ system::{Query, Res}, }; use bevy_image::{Image, TextureFormatPixelInfo}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::Reflect; use bevy_render_macros::ExtractComponent; use encase::internal::ReadFrom; diff --git a/crates/bevy_render/src/lib.rs b/crates/bevy_render/src/lib.rs index 050da078dc..843bb68284 100644 --- a/crates/bevy_render/src/lib.rs +++ b/crates/bevy_render/src/lib.rs @@ -78,9 +78,11 @@ pub use extract_param::Extract; use bevy_window::{PrimaryWindow, RawHandleWrapperHolder}; use experimental::occlusion_culling::OcclusionCullingPlugin; -use extract_resource::ExtractResourcePlugin; use globals::GlobalsPlugin; -use render_asset::RenderAssetBytesPerFrame; +use render_asset::{ + extract_render_asset_bytes_per_frame, reset_render_asset_bytes_per_frame, + RenderAssetBytesPerFrame, RenderAssetBytesPerFrameLimiter, +}; use renderer::{RenderAdapter, RenderDevice, RenderQueue}; use settings::RenderResources; use sync_world::{ @@ -102,6 +104,7 @@ use alloc::sync::Arc; use bevy_app::{App, AppLabel, Plugin, SubApp}; use bevy_asset::{load_internal_asset, weak_handle, AssetApp, AssetServer, Handle}; use bevy_ecs::{prelude::*, schedule::ScheduleLabel}; +use bitflags::bitflags; use core::ops::{Deref, DerefMut}; use std::sync::Mutex; use tracing::debug; @@ -120,12 +123,21 @@ pub struct RenderPlugin { /// If `true`, disables asynchronous pipeline compilation. /// This has no effect on macOS, Wasm, iOS, or without the `multi_threaded` feature. pub synchronous_pipeline_compilation: bool, - /// If true, this sets the `COPY_SRC` flag on indirect draw parameters so - /// that they can be read back to CPU. - /// - /// This is a debugging feature that may reduce performance. It primarily - /// exists for the `occlusion_culling` example. - pub allow_copies_from_indirect_parameters: bool, + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, +} + +bitflags! { + /// Debugging flags that can optionally be set when constructing the renderer. + #[derive(Clone, Copy, PartialEq, Default, Debug)] + pub struct RenderDebugFlags: u8 { + /// If true, this sets the `COPY_SRC` flag on indirect draw parameters + /// so that they can be read back to CPU. + /// + /// This is a debugging feature that may reduce performance. It + /// primarily exists for the `occlusion_culling` example. + const ALLOW_COPIES_FROM_INDIRECT_PARAMETERS = 1; + } } /// The systems sets of the default [`App`] rendering schedule. @@ -146,6 +158,9 @@ pub enum RenderSet { Queue, /// A sub-set within [`Queue`](RenderSet::Queue) where mesh entity queue systems are executed. Ensures `prepare_assets::` is completed. QueueMeshes, + /// A sub-set within [`Queue`](RenderSet::Queue) where meshes that have + /// become invisible or changed phases are removed from the bins. + QueueSweep, // TODO: This could probably be moved in favor of a system ordering // abstraction in `Render` or `Queue` /// Sort the [`SortedRenderPhase`](render_phase::SortedRenderPhase)s and @@ -156,6 +171,9 @@ pub enum RenderSet { Prepare, /// A sub-set within [`Prepare`](RenderSet::Prepare) for initializing buffers, textures and uniforms for use in bind groups. PrepareResources, + /// Collect phase buffers after + /// [`PrepareResources`](RenderSet::PrepareResources) has run. + PrepareResourcesCollectPhaseBuffers, /// Flush buffers after [`PrepareResources`](RenderSet::PrepareResources), but before [`PrepareBindGroups`](RenderSet::PrepareBindGroups). PrepareResourcesFlush, /// A sub-set within [`Prepare`](RenderSet::Prepare) for constructing bind groups, or other data that relies on render resources prepared in [`PrepareResources`](RenderSet::PrepareResources). @@ -172,7 +190,7 @@ pub enum RenderSet { } /// The main render schedule. -#[derive(ScheduleLabel, Debug, Hash, PartialEq, Eq, Clone)] +#[derive(ScheduleLabel, Debug, Hash, PartialEq, Eq, Clone, Default)] pub struct Render; impl Render { @@ -201,12 +219,18 @@ impl Render { schedule.configure_sets((ExtractCommands, PrepareAssets, PrepareMeshes, Prepare).chain()); schedule.configure_sets( - QueueMeshes + (QueueMeshes, QueueSweep) + .chain() .in_set(Queue) .after(prepare_assets::), ); schedule.configure_sets( - (PrepareResources, PrepareResourcesFlush, PrepareBindGroups) + ( + PrepareResources, + PrepareResourcesCollectPhaseBuffers, + PrepareResourcesFlush, + PrepareBindGroups, + ) .chain() .in_set(Prepare), ); @@ -222,7 +246,7 @@ impl Render { /// /// This schedule is run on the main world, but its buffers are not applied /// until it is returned to the render world. -#[derive(ScheduleLabel, PartialEq, Eq, Debug, Clone, Hash)] +#[derive(ScheduleLabel, PartialEq, Eq, Debug, Clone, Hash, Default)] pub struct ExtractSchedule; /// The simulation [`World`] of the application, stored as a resource. @@ -261,12 +285,12 @@ struct FutureRenderResources(Arc>>); #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, AppLabel)] pub struct RenderApp; -pub const INSTANCE_INDEX_SHADER_HANDLE: Handle = - weak_handle!("475c76aa-4afd-4a6b-9878-1fc1e2f41216"); pub const MATHS_SHADER_HANDLE: Handle = weak_handle!("d94d70d4-746d-49c4-bfc3-27d63f2acda0"); pub const COLOR_OPERATIONS_SHADER_HANDLE: Handle = weak_handle!("33a80b2f-aaf7-4c86-b828-e7ae83b72f1a"); +pub const BINDLESS_SHADER_HANDLE: Handle = + weak_handle!("13f1baaa-41bf-448e-929e-258f9307a522"); impl Plugin for RenderPlugin { /// Initializes the renderer, sets up the [`RenderSet`] and creates the rendering sub-app. @@ -293,7 +317,7 @@ impl Plugin for RenderPlugin { let primary_window = app .world_mut() .query_filtered::<&RawHandleWrapperHolder, With>() - .get_single(app.world()) + .single(app.world()) .ok() .cloned(); let settings = render_creation.clone(); @@ -376,16 +400,26 @@ impl Plugin for RenderPlugin { GlobalsPlugin, MorphPlugin, BatchingPlugin { - allow_copies_from_indirect_parameters: self.allow_copies_from_indirect_parameters, + debug_flags: self.debug_flags, }, SyncWorldPlugin, StoragePlugin, GpuReadbackPlugin::default(), OcclusionCullingPlugin, + #[cfg(feature = "tracing-tracy")] + diagnostic::RenderDiagnosticsPlugin, )); - app.init_resource::() - .add_plugins(ExtractResourcePlugin::::default()); + app.init_resource::(); + if let Some(render_app) = app.get_sub_app_mut(RenderApp) { + render_app.init_resource::(); + render_app + .add_systems(ExtractSchedule, extract_render_asset_bytes_per_frame) + .add_systems( + Render, + reset_render_asset_bytes_per_frame.in_set(RenderSet::Cleanup), + ); + } app.register_type::() // These types cannot be registered in bevy_color, as it does not depend on the rest of Bevy @@ -412,6 +446,12 @@ impl Plugin for RenderPlugin { "color_operations.wgsl", Shader::from_wgsl ); + load_internal_asset!( + app, + BINDLESS_SHADER_HANDLE, + "bindless.wgsl", + Shader::from_wgsl + ); if let Some(future_render_resources) = app.world_mut().remove_resource::() { @@ -435,14 +475,7 @@ impl Plugin for RenderPlugin { .insert_resource(device) .insert_resource(queue) .insert_resource(render_adapter) - .insert_resource(adapter_info) - .add_systems( - Render, - (|mut bpf: ResMut| { - bpf.reset(); - }) - .in_set(RenderSet::Cleanup), - ); + .insert_resource(adapter_info); } } } @@ -551,3 +584,26 @@ pub fn get_adreno_model(adapter: &RenderAdapter) -> Option { .fold(0, |acc, digit| acc * 10 + digit), ) } + +/// Get the Mali driver version if the adapter is a Mali GPU. +pub fn get_mali_driver_version(adapter: &RenderAdapter) -> Option { + if !cfg!(target_os = "android") { + return None; + } + + let driver_name = adapter.get_info().name; + if !driver_name.contains("Mali") { + return None; + } + let driver_info = adapter.get_info().driver_info; + if let Some(start_pos) = driver_info.find("v1.r") { + if let Some(end_pos) = driver_info[start_pos..].find('p') { + let start_idx = start_pos + 4; // Skip "v1.r" + let end_idx = start_pos + end_pos; + + return driver_info[start_idx..end_idx].parse::().ok(); + } + } + + None +} diff --git a/crates/bevy_render/src/mesh/allocator.rs b/crates/bevy_render/src/mesh/allocator.rs index b703792b65..bc638859ed 100644 --- a/crates/bevy_render/src/mesh/allocator.rs +++ b/crates/bevy_render/src/mesh/allocator.rs @@ -12,11 +12,11 @@ use bevy_asset::AssetId; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ resource::Resource, - schedule::IntoSystemConfigs as _, + schedule::IntoScheduleConfigs as _, system::{Res, ResMut}, world::{FromWorld, World}, }; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{hash_map::Entry, HashMap, HashSet}; use bevy_utils::default; use offset_allocator::{Allocation, Allocator}; use tracing::error; @@ -196,7 +196,7 @@ struct GeneralSlab { element_layout: ElementLayout, /// The size of this slab in slots. - slot_capacity: u32, + current_slot_capacity: u32, } /// A slab that contains a single object. @@ -224,6 +224,18 @@ enum ElementClass { Index, } +/// The results of [`GeneralSlab::grow_if_necessary`]. +enum SlabGrowthResult { + /// The mesh data already fits in the slab; the slab doesn't need to grow. + NoGrowthNeeded, + /// The slab needed to grow. + /// + /// The [`SlabToReallocate`] contains the old capacity of the slab. + NeededGrowth(SlabToReallocate), + /// The slab wanted to grow but couldn't because it hit its maximum size. + CantGrow, +} + /// Information about the size of individual elements (vertices or indices) /// within a slab. /// @@ -278,9 +290,8 @@ struct SlabsToReallocate(HashMap); /// reallocated. #[derive(Default)] struct SlabToReallocate { - /// Maps all allocations that need to be relocated to their positions within - /// the *new* slab. - allocations_to_copy: HashMap, SlabAllocation>, + /// The capacity of the slab before we decided to grow it. + old_slot_capacity: u32, } impl Display for SlabId { @@ -347,7 +358,10 @@ pub fn allocate_and_free_meshes( render_device: Res, render_queue: Res, ) { - // Process newly-added meshes. + // Process removed or modified meshes. + mesh_allocator.free_meshes(&extracted_meshes); + + // Process newly-added or modified meshes. mesh_allocator.allocate_meshes( &mesh_allocator_settings, &extracted_meshes, @@ -355,9 +369,6 @@ pub fn allocate_and_free_meshes( &render_device, &render_queue, ); - - // Process removed meshes. - mesh_allocator.free_meshes(&extracted_meshes); } impl MeshAllocator { @@ -398,7 +409,7 @@ impl MeshAllocator { slab_id: SlabId, ) -> Option { match self.slabs.get(&slab_id)? { - Slab::General(ref general_slab) => { + Slab::General(general_slab) => { let slab_allocation = general_slab.resident_allocations.get(mesh_id)?; Some(MeshBufferSlice { buffer: general_slab.buffer.as_ref()?, @@ -409,7 +420,7 @@ impl MeshAllocator { }) } - Slab::LargeObject(ref large_object_slab) => { + Slab::LargeObject(large_object_slab) => { let buffer = large_object_slab.buffer.as_ref()?; Some(MeshBufferSlice { buffer, @@ -544,7 +555,7 @@ impl MeshAllocator { match *slab { Slab::General(ref mut general_slab) => { - let (Some(ref buffer), Some(allocated_range)) = ( + let (Some(buffer), Some(allocated_range)) = ( &general_slab.buffer, general_slab.pending_allocations.remove(mesh_id), ) else { @@ -596,9 +607,17 @@ impl MeshAllocator { } } + /// Frees allocations for meshes that were removed or modified this frame. fn free_meshes(&mut self, extracted_meshes: &ExtractedAssets) { let mut empty_slabs = >::default(); - for mesh_id in &extracted_meshes.removed { + + // TODO: Consider explicitly reusing allocations for changed meshes of the same size + let meshes_to_free = extracted_meshes + .removed + .iter() + .chain(extracted_meshes.modified.iter()); + + for mesh_id in meshes_to_free { if let Some(slab_id) = self.mesh_id_to_vertex_slab.remove(mesh_id) { self.free_allocation_in_slab(mesh_id, slab_id, &mut empty_slabs); } @@ -694,32 +713,39 @@ impl MeshAllocator { // and try to allocate the mesh inside them. We go with the first one // that succeeds. let mut mesh_allocation = None; - 'slab: for &slab_id in &*candidate_slabs { - loop { - let Some(Slab::General(ref mut slab)) = self.slabs.get_mut(&slab_id) else { - unreachable!("Slab not found") - }; + for &slab_id in &*candidate_slabs { + let Some(Slab::General(slab)) = self.slabs.get_mut(&slab_id) else { + unreachable!("Slab not found") + }; - if let Some(allocation) = slab.allocator.allocate(data_slot_count) { - mesh_allocation = Some(MeshAllocation { - slab_id, - slab_allocation: SlabAllocation { - allocation, - slot_count: data_slot_count, - }, - }); - break 'slab; - } + let Some(allocation) = slab.allocator.allocate(data_slot_count) else { + continue; + }; - // Try to grow the slab. If this fails, the slab is full; go on - // to the next slab. - match slab.try_grow(settings) { - Ok(new_mesh_allocation_records) => { - slabs_to_grow.insert(slab_id, new_mesh_allocation_records); + // Try to fit the object in the slab, growing if necessary. + match slab.grow_if_necessary(allocation.offset + data_slot_count, settings) { + SlabGrowthResult::NoGrowthNeeded => {} + SlabGrowthResult::NeededGrowth(slab_to_reallocate) => { + // If we already grew the slab this frame, don't replace the + // `SlabToReallocate` entry. We want to keep the entry + // corresponding to the size that the slab had at the start + // of the frame, so that we can copy only the used portion + // of the initial buffer to the new one. + if let Entry::Vacant(vacant_entry) = slabs_to_grow.entry(slab_id) { + vacant_entry.insert(slab_to_reallocate); } - Err(()) => continue 'slab, } + SlabGrowthResult::CantGrow => continue, } + + mesh_allocation = Some(MeshAllocation { + slab_id, + slab_allocation: SlabAllocation { + allocation, + slot_count: data_slot_count, + }, + }); + break; } // If we still have no allocation, make a new slab. @@ -745,9 +771,7 @@ impl MeshAllocator { // Mark the allocation as pending. Don't copy it in just yet; further // meshes loaded this frame may result in its final allocation location // changing. - if let Some(Slab::General(ref mut general_slab)) = - self.slabs.get_mut(&mesh_allocation.slab_id) - { + if let Some(Slab::General(general_slab)) = self.slabs.get_mut(&mesh_allocation.slab_id) { general_slab .pending_allocations .insert(*mesh_id, mesh_allocation.slab_allocation); @@ -774,10 +798,11 @@ impl MeshAllocator { /// Reallocates a slab that needs to be resized, or allocates a new slab. /// - /// This performs the actual growth operation that [`GeneralSlab::try_grow`] - /// scheduled. We do the growth in two phases so that, if a slab grows - /// multiple times in the same frame, only one new buffer is reallocated, - /// rather than reallocating the buffer multiple times. + /// This performs the actual growth operation that + /// [`GeneralSlab::grow_if_necessary`] scheduled. We do the growth in two + /// phases so that, if a slab grows multiple times in the same frame, only + /// one new buffer is reallocated, rather than reallocating the buffer + /// multiple times. fn reallocate_slab( &mut self, render_device: &RenderDevice, @@ -805,38 +830,28 @@ impl MeshAllocator { slab_id, buffer_usages_to_str(buffer_usages) )), - size: slab.slot_capacity as u64 * slab.element_layout.slot_size(), + size: slab.current_slot_capacity as u64 * slab.element_layout.slot_size(), usage: buffer_usages, mapped_at_creation: false, }); slab.buffer = Some(new_buffer.clone()); + let Some(old_buffer) = old_buffer else { return }; + // In order to do buffer copies, we need a command encoder. let mut encoder = render_device.create_command_encoder(&CommandEncoderDescriptor { label: Some("slab resize encoder"), }); - // If we have no objects to copy over, we're done. - let Some(old_buffer) = old_buffer else { - return; - }; - - for (mesh_id, src_slab_allocation) in &mut slab.resident_allocations { - let Some(dest_slab_allocation) = slab_to_grow.allocations_to_copy.get(mesh_id) else { - continue; - }; - - encoder.copy_buffer_to_buffer( - &old_buffer, - src_slab_allocation.allocation.offset as u64 * slab.element_layout.slot_size(), - &new_buffer, - dest_slab_allocation.allocation.offset as u64 * slab.element_layout.slot_size(), - dest_slab_allocation.slot_count as u64 * slab.element_layout.slot_size(), - ); - // Now that we've done the copy, we can update the allocation record. - *src_slab_allocation = dest_slab_allocation.clone(); - } + // Copy the data from the old buffer into the new one. + encoder.copy_buffer_to_buffer( + &old_buffer, + 0, + &new_buffer, + 0, + slab_to_grow.old_slot_capacity as u64 * slab.element_layout.slot_size(), + ); let command_buffer = encoder.finish(); render_queue.submit([command_buffer]); @@ -872,16 +887,19 @@ impl GeneralSlab { layout: ElementLayout, data_slot_count: u32, ) -> GeneralSlab { - let slab_slot_capacity = (settings.min_slab_size.div_ceil(layout.slot_size()) as u32) + let initial_slab_slot_capacity = (settings.min_slab_size.div_ceil(layout.slot_size()) + as u32) + .max(offset_allocator::ext::min_allocator_size(data_slot_count)); + let max_slab_slot_capacity = (settings.max_slab_size.div_ceil(layout.slot_size()) as u32) .max(offset_allocator::ext::min_allocator_size(data_slot_count)); let mut new_slab = GeneralSlab { - allocator: Allocator::new(slab_slot_capacity), + allocator: Allocator::new(max_slab_slot_capacity), buffer: None, resident_allocations: HashMap::default(), pending_allocations: HashMap::default(), element_layout: layout, - slot_capacity: slab_slot_capacity, + current_slot_capacity: initial_slab_slot_capacity, }; // This should never fail. @@ -898,68 +916,40 @@ impl GeneralSlab { new_slab } - /// Attempts to grow a slab that's just run out of space. + /// Checks to see if the size of this slab is at least `new_size_in_slots` + /// and grows the slab if it isn't. /// - /// Returns a structure the allocations that need to be relocated if the - /// growth succeeded. If the slab is full, returns `Err`. - fn try_grow(&mut self, settings: &MeshAllocatorSettings) -> Result { - // In extremely rare cases due to allocator fragmentation, it may happen - // that we fail to re-insert every object that was in the slab after - // growing it. Even though this will likely never happen, we use this - // loop to handle this unlikely event properly if it does. - 'grow: loop { - let new_slab_slot_capacity = ((self.slot_capacity as f64 * settings.growth_factor) - .ceil() as u32) - .min((settings.max_slab_size / self.element_layout.slot_size()) as u32); - if new_slab_slot_capacity == self.slot_capacity { - // The slab is full. - return Err(()); - } - - // Grow the slab. - self.allocator = Allocator::new(new_slab_slot_capacity); - self.slot_capacity = new_slab_slot_capacity; - - let mut slab_to_grow = SlabToReallocate::default(); - - // Place every resident allocation that was in the old slab in the - // new slab. - for (allocated_mesh_id, old_allocation_range) in &self.resident_allocations { - let allocation_size = old_allocation_range.slot_count; - match self.allocator.allocate(allocation_size) { - Some(allocation) => { - slab_to_grow.allocations_to_copy.insert( - *allocated_mesh_id, - SlabAllocation { - allocation, - slot_count: allocation_size, - }, - ); - } - None => { - // We failed to insert one of the allocations that we - // had before. - continue 'grow; - } - } - } - - // Move every allocation that was pending in the old slab to the new - // slab. - for slab_allocation in self.pending_allocations.values_mut() { - let allocation_size = slab_allocation.slot_count; - match self.allocator.allocate(allocation_size) { - Some(allocation) => slab_allocation.allocation = allocation, - None => { - // We failed to insert one of the allocations that we - // had before. - continue 'grow; - } - } - } - - return Ok(slab_to_grow); + /// The returned [`SlabGrowthResult`] describes whether the slab needed to + /// grow and whether, if so, it was successful in doing so. + fn grow_if_necessary( + &mut self, + new_size_in_slots: u32, + settings: &MeshAllocatorSettings, + ) -> SlabGrowthResult { + // Is the slab big enough already? + let initial_slot_capacity = self.current_slot_capacity; + if self.current_slot_capacity >= new_size_in_slots { + return SlabGrowthResult::NoGrowthNeeded; } + + // Try to grow in increments of `MeshAllocatorSettings::growth_factor` + // until we're big enough. + while self.current_slot_capacity < new_size_in_slots { + let new_slab_slot_capacity = + ((self.current_slot_capacity as f64 * settings.growth_factor).ceil() as u32) + .min((settings.max_slab_size / self.element_layout.slot_size()) as u32); + if new_slab_slot_capacity == self.current_slot_capacity { + // The slab is full. + return SlabGrowthResult::CantGrow; + } + + self.current_slot_capacity = new_slab_slot_capacity; + } + + // Tell our caller what we did. + SlabGrowthResult::NeededGrowth(SlabToReallocate { + old_slot_capacity: initial_slot_capacity, + }) } } diff --git a/crates/bevy_render/src/mesh/components.rs b/crates/bevy_render/src/mesh/components.rs index 7c0d87cf51..000de324e3 100644 --- a/crates/bevy_render/src/mesh/components.rs +++ b/crates/bevy_render/src/mesh/components.rs @@ -5,10 +5,10 @@ use crate::{ use bevy_asset::{AsAssetId, AssetEvent, AssetId, Handle}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - change_detection::DetectChangesMut, component::Component, event::EventReader, prelude::require, + change_detection::DetectChangesMut, component::Component, event::EventReader, reflect::ReflectComponent, system::Query, }; -use bevy_platform_support::{collections::HashSet, hash::FixedHasher}; +use bevy_platform::{collections::HashSet, hash::FixedHasher}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_transform::components::Transform; use derive_more::derive::From; @@ -41,7 +41,7 @@ use derive_more::derive::From; /// } /// ``` #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone, PartialEq)] #[require(Transform, Visibility, VisibilityClass)] #[component(on_add = view::add_visibility_class::)] pub struct Mesh2d(pub Handle); @@ -97,7 +97,7 @@ impl AsAssetId for Mesh2d { /// } /// ``` #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone, PartialEq)] #[require(Transform, Visibility, VisibilityClass)] #[component(on_add = view::add_visibility_class::)] pub struct Mesh3d(pub Handle); @@ -150,3 +150,8 @@ pub fn mark_3d_meshes_as_changed_if_their_assets_changed( } } } + +/// A component that stores an arbitrary index used to identify the mesh instance when rendering. +#[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq)] +#[reflect(Component, Default, Clone, PartialEq)] +pub struct MeshTag(pub u32); diff --git a/crates/bevy_render/src/mesh/mod.rs b/crates/bevy_render/src/mesh/mod.rs index cb89aaf6bd..fbd530c14d 100644 --- a/crates/bevy_render/src/mesh/mod.rs +++ b/crates/bevy_render/src/mesh/mod.rs @@ -21,7 +21,7 @@ use bevy_ecs::{ SystemParamItem, }, }; -pub use components::{mark_3d_meshes_as_changed_if_their_assets_changed, Mesh2d, Mesh3d}; +pub use components::{mark_3d_meshes_as_changed_if_their_assets_changed, Mesh2d, Mesh3d, MeshTag}; use wgpu::IndexFormat; /// Registers all [`MeshBuilder`] types. @@ -208,7 +208,7 @@ impl RenderAsset for RenderMesh { fn prepare_asset( mesh: Self::SourceAsset, _: AssetId, - (images, ref mut mesh_vertex_buffer_layouts): &mut SystemParamItem, + (images, mesh_vertex_buffer_layouts): &mut SystemParamItem, ) -> Result> { let morph_targets = match mesh.morph_targets() { Some(mt) => { diff --git a/crates/bevy_render/src/pipelined_rendering.rs b/crates/bevy_render/src/pipelined_rendering.rs index ed54a6301a..fb665e469d 100644 --- a/crates/bevy_render/src/pipelined_rendering.rs +++ b/crates/bevy_render/src/pipelined_rendering.rs @@ -92,14 +92,14 @@ impl Drop for RenderAppChannels { /// ``` /// /// - `sync` is the step where the entity-entity mapping between the main and render world is updated. -/// This is run on the main app's thread. For more information checkout [`SyncWorldPlugin`]. +/// This is run on the main app's thread. For more information checkout [`SyncWorldPlugin`]. /// - `extract` is the step where data is copied from the main world to the render world. -/// This is run on the main app's thread. +/// This is run on the main app's thread. /// - On the render thread, we first apply the `extract commands`. This is not run during extract, so the -/// main schedule can start sooner. +/// main schedule can start sooner. /// - Then the `rendering schedule` is run. See [`RenderSet`](crate::RenderSet) for the standard steps in this process. /// - In parallel to the rendering thread the [`RenderExtractApp`] schedule runs. By -/// default, this schedule is empty. But it is useful if you need something to run before I/O processing. +/// default, this schedule is empty. But it is useful if you need something to run before I/O processing. /// - Next all the `winit events` are processed. /// - And finally the `main app schedule` is run. /// - Once both the `main app schedule` and the `render schedule` are finished running, `extract` is run again. diff --git a/crates/bevy_render/src/primitives/mod.rs b/crates/bevy_render/src/primitives/mod.rs index 9123e95b5e..ca664fc338 100644 --- a/crates/bevy_render/src/primitives/mod.rs +++ b/crates/bevy_render/src/primitives/mod.rs @@ -1,13 +1,13 @@ use core::borrow::Borrow; -use bevy_ecs::{component::Component, entity::hash_map::EntityHashMap, reflect::ReflectComponent}; +use bevy_ecs::{component::Component, entity::EntityHashMap, reflect::ReflectComponent}; use bevy_math::{Affine3A, Mat3A, Mat4, Vec3, Vec3A, Vec4, Vec4Swizzles}; use bevy_reflect::prelude::*; /// An axis-aligned bounding box, defined by: /// - a center, /// - the distances from the center to each faces along the axis, -/// the faces are orthogonal to the axis. +/// the faces are orthogonal to the axis. /// /// It is typically used as a component on an entity to represent the local space /// occupied by this entity, with faces orthogonal to its local axis. @@ -18,7 +18,7 @@ use bevy_reflect::prelude::*; /// /// It will be added automatically by the systems in [`CalculateBounds`] to entities that: /// - could be subject to frustum culling, for example with a [`Mesh3d`] -/// or `Sprite` component, +/// or `Sprite` component, /// - don't have the [`NoFrustumCulling`] component. /// /// It won't be updated automatically if the space occupied by the entity changes, @@ -29,7 +29,7 @@ use bevy_reflect::prelude::*; /// [`CalculateBounds`]: crate::view::visibility::VisibilitySystems::CalculateBounds /// [`Mesh3d`]: crate::mesh::Mesh #[derive(Component, Clone, Copy, Debug, Default, Reflect, PartialEq)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub struct Aabb { pub center: Vec3A, pub half_extents: Vec3A, @@ -224,9 +224,9 @@ impl HalfSpace { /// [`CameraProjection`]: crate::camera::CameraProjection /// [`GlobalTransform`]: bevy_transform::components::GlobalTransform #[derive(Component, Clone, Copy, Debug, Default, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct Frustum { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub half_spaces: [HalfSpace; 6], } @@ -327,9 +327,9 @@ impl Frustum { } #[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct CubemapFrusta { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub frusta: [Frustum; 6], } @@ -343,9 +343,9 @@ impl CubemapFrusta { } #[derive(Component, Debug, Default, Reflect, Clone)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct CascadesFrusta { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub frusta: EntityHashMap>, } diff --git a/crates/bevy_render/src/render_asset.rs b/crates/bevy_render/src/render_asset.rs index 043b3554a1..6626cb7797 100644 --- a/crates/bevy_render/src/render_asset.rs +++ b/crates/bevy_render/src/render_asset.rs @@ -1,18 +1,19 @@ use crate::{ - render_resource::AsBindGroupError, ExtractSchedule, MainWorld, Render, RenderApp, RenderSet, + render_resource::AsBindGroupError, Extract, ExtractSchedule, MainWorld, Render, RenderApp, + RenderSet, Res, }; use bevy_app::{App, Plugin, SubApp}; pub use bevy_asset::RenderAssetUsages; use bevy_asset::{Asset, AssetEvent, AssetId, Assets}; use bevy_ecs::{ - prelude::{Commands, EventReader, IntoSystemConfigs, ResMut, Resource}, - schedule::{SystemConfigs, SystemSet}, - system::{StaticSystemParam, SystemParam, SystemParamItem, SystemState}, + prelude::{Commands, EventReader, IntoScheduleConfigs, ResMut, Resource}, + schedule::{ScheduleConfigs, SystemSet}, + system::{ScheduleSystem, StaticSystemParam, SystemParam, SystemParamItem, SystemState}, world::{FromWorld, Mut}, }; -use bevy_platform_support::collections::{HashMap, HashSet}; -use bevy_render_macros::ExtractResource; +use bevy_platform::collections::{HashMap, HashSet}; use core::marker::PhantomData; +use core::sync::atomic::{AtomicUsize, Ordering}; use thiserror::Error; use tracing::{debug, error}; @@ -131,17 +132,17 @@ impl Plugin // helper to allow specifying dependencies between render assets pub trait RenderAssetDependency { - fn register_system(render_app: &mut SubApp, system: SystemConfigs); + fn register_system(render_app: &mut SubApp, system: ScheduleConfigs); } impl RenderAssetDependency for () { - fn register_system(render_app: &mut SubApp, system: SystemConfigs) { + fn register_system(render_app: &mut SubApp, system: ScheduleConfigs) { render_app.add_systems(Render, system); } } impl RenderAssetDependency for A { - fn register_system(render_app: &mut SubApp, system: SystemConfigs) { + fn register_system(render_app: &mut SubApp, system: ScheduleConfigs) { render_app.add_systems(Render, system.after(prepare_assets::)); } } @@ -150,14 +151,19 @@ impl RenderAssetDependency for A { #[derive(Resource)] pub struct ExtractedAssets { /// The assets extracted this frame. + /// + /// These are assets that were either added or modified this frame. pub extracted: Vec<(AssetId, A::SourceAsset)>, - /// IDs of the assets removed this frame. + /// IDs of the assets that were removed this frame. /// /// These assets will not be present in [`ExtractedAssets::extracted`]. pub removed: HashSet>, - /// IDs of the assets added this frame. + /// IDs of the assets that were modified this frame. + pub modified: HashSet>, + + /// IDs of the assets that were added this frame. pub added: HashSet>, } @@ -166,6 +172,7 @@ impl Default for ExtractedAssets { Self { extracted: Default::default(), removed: Default::default(), + modified: Default::default(), added: Default::default(), } } @@ -234,8 +241,9 @@ pub(crate) fn extract_render_asset( |world, mut cached_state: Mut>| { let (mut events, mut assets) = cached_state.state.get_mut(world); - let mut changed_assets = >::default(); + let mut needs_extracting = >::default(); let mut removed = >::default(); + let mut modified = >::default(); for event in events.read() { #[expect( @@ -243,12 +251,20 @@ pub(crate) fn extract_render_asset( reason = "LoadedWithDependencies is marked as a TODO, so it's likely this will no longer lint soon." )] match event { - AssetEvent::Added { id } | AssetEvent::Modified { id } => { - changed_assets.insert(*id); + AssetEvent::Added { id } => { + needs_extracting.insert(*id); + } + AssetEvent::Modified { id } => { + needs_extracting.insert(*id); + modified.insert(*id); + } + AssetEvent::Removed { .. } => { + // We don't care that the asset was removed from Assets in the main world. + // An asset is only removed from RenderAssets when its last handle is dropped (AssetEvent::Unused). } - AssetEvent::Removed { .. } => {} AssetEvent::Unused { id } => { - changed_assets.remove(id); + needs_extracting.remove(id); + modified.remove(id); removed.insert(*id); } AssetEvent::LoadedWithDependencies { .. } => { @@ -259,7 +275,7 @@ pub(crate) fn extract_render_asset( let mut extracted_assets = Vec::new(); let mut added = >::default(); - for id in changed_assets.drain() { + for id in needs_extracting.drain() { if let Some(asset) = assets.get(id) { let asset_usage = A::asset_usage(asset); if asset_usage.contains(RenderAssetUsages::RENDER_WORLD) { @@ -279,6 +295,7 @@ pub(crate) fn extract_render_asset( commands.insert_resource(ExtractedAssets:: { extracted: extracted_assets, removed, + modified, added, }); cached_state.state.apply(world); @@ -308,7 +325,7 @@ pub fn prepare_assets( mut render_assets: ResMut>, mut prepare_next_frame: ResMut>, param: StaticSystemParam<::Param>, - mut bpf: ResMut, + bpf: Res, ) { let mut wrote_asset_count = 0; @@ -401,54 +418,94 @@ pub fn prepare_assets( } } -/// A resource that attempts to limit the amount of data transferred from cpu to gpu -/// each frame, preventing choppy frames at the cost of waiting longer for gpu assets -/// to become available -#[derive(Resource, Default, Debug, Clone, Copy, ExtractResource)] +pub fn reset_render_asset_bytes_per_frame( + mut bpf_limiter: ResMut, +) { + bpf_limiter.reset(); +} + +pub fn extract_render_asset_bytes_per_frame( + bpf: Extract>, + mut bpf_limiter: ResMut, +) { + bpf_limiter.max_bytes = bpf.max_bytes; +} + +/// A resource that defines the amount of data allowed to be transferred from CPU to GPU +/// each frame, preventing choppy frames at the cost of waiting longer for GPU assets +/// to become available. +#[derive(Resource, Default)] pub struct RenderAssetBytesPerFrame { pub max_bytes: Option, - pub available: usize, } impl RenderAssetBytesPerFrame { /// `max_bytes`: the number of bytes to write per frame. - /// this is a soft limit: only full assets are written currently, uploading stops + /// + /// This is a soft limit: only full assets are written currently, uploading stops /// after the first asset that exceeds the limit. + /// /// To participate, assets should implement [`RenderAsset::byte_len`]. If the default /// is not overridden, the assets are assumed to be small enough to upload without restriction. pub fn new(max_bytes: usize) -> Self { Self { max_bytes: Some(max_bytes), - available: 0, } } +} - /// Reset the available bytes. Called once per frame by the [`crate::RenderPlugin`]. +/// A render-world resource that facilitates limiting the data transferred from CPU to GPU +/// each frame, preventing choppy frames at the cost of waiting longer for GPU assets +/// to become available. +#[derive(Resource, Default)] +pub struct RenderAssetBytesPerFrameLimiter { + /// Populated by [`RenderAssetBytesPerFrame`] during extraction. + pub max_bytes: Option, + /// Bytes written this frame. + pub bytes_written: AtomicUsize, +} + +impl RenderAssetBytesPerFrameLimiter { + /// Reset the available bytes. Called once per frame during extraction by [`crate::RenderPlugin`]. pub fn reset(&mut self) { - self.available = self.max_bytes.unwrap_or(usize::MAX); - } - - /// check how many bytes are available since the last reset - pub fn available_bytes(&self, required_bytes: usize) -> usize { - if self.max_bytes.is_none() { - return required_bytes; - } - - required_bytes.min(self.available) - } - - /// decrease the available bytes for the current frame - fn write_bytes(&mut self, bytes: usize) { if self.max_bytes.is_none() { return; } - - let write_bytes = bytes.min(self.available); - self.available -= write_bytes; + self.bytes_written.store(0, Ordering::Relaxed); } - // check if any bytes remain available for writing this frame + /// Check how many bytes are available for writing. + pub fn available_bytes(&self, required_bytes: usize) -> usize { + if let Some(max_bytes) = self.max_bytes { + let total_bytes = self + .bytes_written + .fetch_add(required_bytes, Ordering::Relaxed); + + // The bytes available is the inverse of the amount we overshot max_bytes + if total_bytes >= max_bytes { + required_bytes.saturating_sub(total_bytes - max_bytes) + } else { + required_bytes + } + } else { + required_bytes + } + } + + /// Decreases the available bytes for the current frame. + fn write_bytes(&self, bytes: usize) { + if self.max_bytes.is_some() && bytes > 0 { + self.bytes_written.fetch_add(bytes, Ordering::Relaxed); + } + } + + /// Returns `true` if there are no remaining bytes available for writing this frame. fn exhausted(&self) -> bool { - self.max_bytes.is_some() && self.available == 0 + if let Some(max_bytes) = self.max_bytes { + let bytes_written = self.bytes_written.load(Ordering::Relaxed); + bytes_written >= max_bytes + } else { + false + } } } diff --git a/crates/bevy_render/src/render_graph/graph.rs b/crates/bevy_render/src/render_graph/graph.rs index 78e6cbf324..d1a7020bcf 100644 --- a/crates/bevy_render/src/render_graph/graph.rs +++ b/crates/bevy_render/src/render_graph/graph.rs @@ -6,7 +6,7 @@ use crate::{ renderer::RenderContext, }; use bevy_ecs::{define_label, intern::Interned, prelude::World, resource::Resource}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use core::fmt::Debug; use super::{EdgeExistence, InternedRenderLabel, IntoRenderNodeArray}; @@ -680,7 +680,7 @@ mod tests { renderer::RenderContext, }; use bevy_ecs::world::{FromWorld, World}; - use bevy_platform_support::collections::HashSet; + use bevy_platform::collections::HashSet; #[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)] enum TestLabel { diff --git a/crates/bevy_render/src/render_phase/draw.rs b/crates/bevy_render/src/render_phase/draw.rs index 4343fda6d3..a12d336018 100644 --- a/crates/bevy_render/src/render_phase/draw.rs +++ b/crates/bevy_render/src/render_phase/draw.rs @@ -332,7 +332,9 @@ where let view = match self.view.get_manual(world, view) { Ok(view) => view, Err(err) => match err { - QueryEntityError::NoSuchEntity(_, _) => return Err(DrawError::ViewEntityNotFound), + QueryEntityError::EntityDoesNotExist(_) => { + return Err(DrawError::ViewEntityNotFound) + } QueryEntityError::QueryDoesNotMatch(_, _) | QueryEntityError::AliasedMutability(_) => { return Err(DrawError::InvalidViewQuery) diff --git a/crates/bevy_render/src/render_phase/mod.rs b/crates/bevy_render/src/render_phase/mod.rs index e9baccf141..a4eb4a944f 100644 --- a/crates/bevy_render/src/render_phase/mod.rs +++ b/crates/bevy_render/src/render_phase/mod.rs @@ -32,21 +32,25 @@ use bevy_app::{App, Plugin}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::component::Tick; use bevy_ecs::entity::EntityHash; -use bevy_platform_support::collections::{hash_map::Entry, HashMap}; +use bevy_platform::collections::{hash_map::Entry, HashMap}; use bevy_utils::default; pub use draw::*; pub use draw_state::*; use encase::{internal::WriteInto, ShaderSize}; use fixedbitset::{Block, FixedBitSet}; -use indexmap::{IndexMap, IndexSet}; +use indexmap::IndexMap; use nonmax::NonMaxU32; pub use rangefinder::*; use wgpu::Features; -use crate::batching::gpu_preprocessing::{GpuPreprocessingMode, GpuPreprocessingSupport}; +use crate::batching::gpu_preprocessing::{ + GpuPreprocessingMode, GpuPreprocessingSupport, PhaseBatchedInstanceBuffers, + PhaseIndirectParametersBuffers, +}; use crate::renderer::RenderDevice; use crate::sync_world::{MainEntity, MainEntityHashMap}; use crate::view::RetainedViewEntity; +use crate::RenderDebugFlags; use crate::{ batching::{ self, @@ -63,6 +67,7 @@ use bevy_ecs::{ }; use core::{fmt::Debug, hash::Hash, iter, marker::PhantomData, ops::Range, slice::SliceIndex}; use smallvec::SmallVec; +use tracing::warn; /// Stores the rendering instructions for a single phase that uses bins in all /// views. @@ -92,13 +97,7 @@ pub struct BinnedRenderPhase where BPI: BinnedPhaseItem, { - /// A list of `BatchSetKey`s for batchable, multidrawable items. - /// - /// These are accumulated in `queue_material_meshes` and then sorted in - /// `batching::sort_binned_render_phase`. - pub multidrawable_mesh_keys: Vec, - - /// The multidrawable bins themselves. + /// The multidrawable bins. /// /// Each batch set key maps to a *batch set*, which in this case is a set of /// meshes that can be drawn together in one multidraw call. Each batch set @@ -111,36 +110,18 @@ where /// the same pipeline. The first bin, corresponding to the cubes, will have /// two entities in it. The second bin, corresponding to the sphere, will /// have one entity in it. - pub multidrawable_mesh_values: HashMap>, - - /// A list of `BinKey`s for batchable items that aren't multidrawable. - /// - /// These are accumulated in `queue_material_meshes` and then sorted in - /// `batch_and_prepare_binned_render_phase`. - /// - /// Usually, batchable items aren't multidrawable due to platform or - /// hardware limitations. However, it's also possible to have batchable - /// items alongside multidrawable items with custom mesh pipelines. See - /// `specialized_mesh_pipeline` for an example. - pub batchable_mesh_keys: Vec<(BPI::BatchSetKey, BPI::BinKey)>, + pub multidrawable_meshes: IndexMap>, /// The bins corresponding to batchable items that aren't multidrawable. /// - /// For multidrawable entities, use `multidrawable_mesh_values`; for + /// For multidrawable entities, use `multidrawable_meshes`; for /// unbatchable entities, use `unbatchable_values`. - pub batchable_mesh_values: HashMap<(BPI::BatchSetKey, BPI::BinKey), RenderBin>, - - /// A list of `BinKey`s for unbatchable items. - /// - /// These are accumulated in `queue_material_meshes` and then sorted in - /// `batch_and_prepare_binned_render_phase`. - pub unbatchable_mesh_keys: Vec<(BPI::BatchSetKey, BPI::BinKey)>, + pub batchable_meshes: IndexMap<(BPI::BatchSetKey, BPI::BinKey), RenderBin>, /// The unbatchable bins. /// /// Each entity here is rendered in a separate drawcall. - pub unbatchable_mesh_values: - HashMap<(BPI::BatchSetKey, BPI::BinKey), UnbatchableBinnedEntities>, + pub unbatchable_meshes: IndexMap<(BPI::BatchSetKey, BPI::BinKey), UnbatchableBinnedEntities>, /// Items in the bin that aren't meshes at all. /// @@ -149,7 +130,7 @@ where /// entity are simply called in order at rendering time. /// /// See the `custom_phase_item` example for an example of how to use this. - pub non_mesh_items: HashMap<(BPI::BatchSetKey, BPI::BinKey), RenderBin>, + pub non_mesh_items: IndexMap<(BPI::BatchSetKey, BPI::BinKey), NonMeshEntities>, /// Information on each batch set. /// @@ -168,7 +149,7 @@ where /// We retain these so that, when the entity changes, /// [`Self::sweep_old_entities`] can quickly find the bin it was located in /// and remove it. - cached_entity_bin_keys: IndexMap, EntityHash>, + cached_entity_bin_keys: IndexMap, EntityHash>, /// The set of indices in [`Self::cached_entity_bin_keys`] that are /// confirmed to be up to date. @@ -185,14 +166,18 @@ where /// remove the entity from the old bin during /// [`BinnedRenderPhase::sweep_old_entities`]. entities_that_changed_bins: Vec>, + /// The gpu preprocessing mode configured for the view this phase is associated + /// with. + gpu_preprocessing_mode: GpuPreprocessingMode, } /// All entities that share a mesh and a material and can be batched as part of /// a [`BinnedRenderPhase`]. #[derive(Default)] pub struct RenderBin { - /// A list of the entities in each bin. - entities: IndexSet, + /// A list of the entities in each bin, along with their cached + /// [`InputUniformIndex`]. + entities: IndexMap, } /// Information that we track about an entity that was in one bin on the @@ -204,10 +189,23 @@ where /// The entity. main_entity: MainEntity, /// The key that identifies the bin that this entity used to be in. - old_bin_key: CachedBinKey, + old_cached_binned_entity: CachedBinnedEntity, } /// Information that we keep about an entity currently within a bin. +pub struct CachedBinnedEntity +where + BPI: BinnedPhaseItem, +{ + /// Information that we use to identify a cached entity in a bin. + pub cached_bin_key: Option>, + /// The last modified tick of the entity. + /// + /// We use this to detect when the entity needs to be invalidated. + pub change_tick: Tick, +} + +/// Information that we use to identify a cached entity in a bin. pub struct CachedBinKey where BPI: BinnedPhaseItem, @@ -219,10 +217,18 @@ where /// The type of render phase that we use to render the entity: multidraw, /// plain batch, etc. pub phase_type: BinnedRenderPhaseType, - /// The last modified tick of the entity. - /// - /// We use this to detect when the entity needs to be invalidated. - pub change_tick: Tick, +} + +impl Clone for CachedBinnedEntity +where + BPI: BinnedPhaseItem, +{ + fn clone(&self) -> Self { + CachedBinnedEntity { + cached_bin_key: self.cached_bin_key.clone(), + change_tick: self.change_tick, + } + } } impl Clone for CachedBinKey @@ -234,11 +240,21 @@ where batch_set_key: self.batch_set_key.clone(), bin_key: self.bin_key.clone(), phase_type: self.phase_type, - change_tick: self.change_tick, } } } +impl PartialEq for CachedBinKey +where + BPI: BinnedPhaseItem, +{ + fn eq(&self, other: &Self) -> bool { + self.batch_set_key == other.batch_set_key + && self.bin_key == other.bin_key + && self.phase_type == other.phase_type + } +} + /// How we store and render the batch sets. /// /// Each one of these corresponds to a [`GpuPreprocessingMode`]. @@ -310,6 +326,12 @@ pub struct UnbatchableBinnedEntities { pub(crate) buffer_indices: UnbatchableBinnedEntityIndexSet, } +/// Information about [`BinnedRenderPhaseType::NonMesh`] entities. +pub struct NonMeshEntities { + /// The entities. + pub entities: MainEntityHashMap, +} + /// Stores instance indices and dynamic offsets for unbatchable entities in a /// binned render phase. /// @@ -363,14 +385,12 @@ pub enum BinnedRenderPhaseType { /// can be batched with other meshes of the same type. MultidrawableMesh, - /// The item is a mesh that's eligible for single-draw indirect rendering - /// and can be batched with other meshes of the same type. + /// The item is a mesh that can be batched with other meshes of the same type and + /// drawn in a single draw call. BatchableMesh, /// The item is a mesh that's eligible for indirect rendering, but can't be /// batched with other meshes of the same type. - /// - /// At the moment, this is used for skinned meshes. UnbatchableMesh, /// The item isn't a mesh at all. @@ -422,6 +442,19 @@ where } } +/// The index of the uniform describing this object in the GPU buffer, when GPU +/// preprocessing is enabled. +/// +/// For example, for 3D meshes, this is the index of the `MeshInputUniform` in +/// the buffer. +/// +/// This field is ignored if GPU preprocessing isn't in use, such as (currently) +/// in the case of 2D meshes. In that case, it can be safely set to +/// [`core::default::Default::default`]. +#[derive(Clone, Copy, PartialEq, Default, Deref, DerefMut)] +#[repr(transparent)] +pub struct InputUniformIndex(pub u32); + impl BinnedRenderPhase where BPI: BinnedPhaseItem, @@ -436,22 +469,34 @@ where batch_set_key: BPI::BatchSetKey, bin_key: BPI::BinKey, (entity, main_entity): (Entity, MainEntity), - phase_type: BinnedRenderPhaseType, + input_uniform_index: InputUniformIndex, + mut phase_type: BinnedRenderPhaseType, change_tick: Tick, ) { + // If the user has overridden indirect drawing for this view, we need to + // force the phase type to be batchable instead. + if self.gpu_preprocessing_mode == GpuPreprocessingMode::PreprocessingOnly + && phase_type == BinnedRenderPhaseType::MultidrawableMesh + { + phase_type = BinnedRenderPhaseType::BatchableMesh; + } + match phase_type { BinnedRenderPhaseType::MultidrawableMesh => { - match self.multidrawable_mesh_values.entry(batch_set_key.clone()) { - Entry::Occupied(mut entry) => { + match self.multidrawable_meshes.entry(batch_set_key.clone()) { + indexmap::map::Entry::Occupied(mut entry) => { entry .get_mut() .entry(bin_key.clone()) .or_default() - .insert(main_entity); + .insert(main_entity, input_uniform_index); } - Entry::Vacant(entry) => { - let mut new_batch_set = HashMap::default(); - new_batch_set.insert(bin_key.clone(), RenderBin::from_entity(main_entity)); + indexmap::map::Entry::Vacant(entry) => { + let mut new_batch_set = IndexMap::default(); + new_batch_set.insert( + bin_key.clone(), + RenderBin::from_entity(main_entity, input_uniform_index), + ); entry.insert(new_batch_set); } } @@ -459,27 +504,27 @@ where BinnedRenderPhaseType::BatchableMesh => { match self - .batchable_mesh_values + .batchable_meshes .entry((batch_set_key.clone(), bin_key.clone()).clone()) { - Entry::Occupied(mut entry) => { - entry.get_mut().insert(main_entity); + indexmap::map::Entry::Occupied(mut entry) => { + entry.get_mut().insert(main_entity, input_uniform_index); } - Entry::Vacant(entry) => { - entry.insert(RenderBin::from_entity(main_entity)); + indexmap::map::Entry::Vacant(entry) => { + entry.insert(RenderBin::from_entity(main_entity, input_uniform_index)); } } } BinnedRenderPhaseType::UnbatchableMesh => { match self - .unbatchable_mesh_values + .unbatchable_meshes .entry((batch_set_key.clone(), bin_key.clone())) { - Entry::Occupied(mut entry) => { + indexmap::map::Entry::Occupied(mut entry) => { entry.get_mut().entities.insert(main_entity, entity); } - Entry::Vacant(entry) => { + indexmap::map::Entry::Vacant(entry) => { let mut entities = MainEntityHashMap::default(); entities.insert(main_entity, entity); entry.insert(UnbatchableBinnedEntities { @@ -496,37 +541,53 @@ where .non_mesh_items .entry((batch_set_key.clone(), bin_key.clone()).clone()) { - Entry::Occupied(mut entry) => { - entry.get_mut().insert(main_entity); + indexmap::map::Entry::Occupied(mut entry) => { + entry.get_mut().entities.insert(main_entity, entity); } - Entry::Vacant(entry) => { - entry.insert(RenderBin::from_entity(main_entity)); + indexmap::map::Entry::Vacant(entry) => { + let mut entities = MainEntityHashMap::default(); + entities.insert(main_entity, entity); + entry.insert(NonMeshEntities { entities }); } } } } - let new_bin_key = CachedBinKey { - batch_set_key, - bin_key, - phase_type, + // Update the cache. + self.update_cache( + main_entity, + Some(CachedBinKey { + batch_set_key, + bin_key, + phase_type, + }), + change_tick, + ); + } + + /// Inserts an entity into the cache with the given change tick. + pub fn update_cache( + &mut self, + main_entity: MainEntity, + cached_bin_key: Option>, + change_tick: Tick, + ) { + let new_cached_binned_entity = CachedBinnedEntity { + cached_bin_key, change_tick, }; - let (index, old_bin_key) = self + let (index, old_cached_binned_entity) = self .cached_entity_bin_keys - .insert_full(main_entity, new_bin_key.clone()); + .insert_full(main_entity, new_cached_binned_entity.clone()); // If the entity changed bins, record its old bin so that we can remove // the entity from it. - if let Some(old_bin_key) = old_bin_key { - if old_bin_key.batch_set_key != new_bin_key.batch_set_key - || old_bin_key.bin_key != new_bin_key.bin_key - || old_bin_key.phase_type != new_bin_key.phase_type - { + if let Some(old_cached_binned_entity) = old_cached_binned_entity { + if old_cached_binned_entity.cached_bin_key != new_cached_binned_entity.cached_bin_key { self.entities_that_changed_bins.push(EntityThatChangedBins { main_entity, - old_bin_key, + old_cached_binned_entity, }); } } @@ -574,10 +635,10 @@ where match self.batch_sets { BinnedRenderPhaseBatchSets::DynamicUniforms(ref batch_sets) => { - debug_assert_eq!(self.batchable_mesh_keys.len(), batch_sets.len()); + debug_assert_eq!(self.batchable_meshes.len(), batch_sets.len()); for ((batch_set_key, bin_key), batch_set) in - self.batchable_mesh_keys.iter().zip(batch_sets.iter()) + self.batchable_meshes.keys().zip(batch_sets.iter()) { for batch in batch_set { let binned_phase_item = BPI::new( @@ -602,7 +663,7 @@ where BinnedRenderPhaseBatchSets::Direct(ref batch_set) => { for (batch, (batch_set_key, bin_key)) in - batch_set.iter().zip(self.batchable_mesh_keys.iter()) + batch_set.iter().zip(self.batchable_meshes.keys()) { let binned_phase_item = BPI::new( batch_set_key.clone(), @@ -625,11 +686,11 @@ where BinnedRenderPhaseBatchSets::MultidrawIndirect(ref batch_sets) => { for (batch_set_key, batch_set) in self - .multidrawable_mesh_keys - .iter() + .multidrawable_meshes + .keys() .chain( - self.batchable_mesh_keys - .iter() + self.batchable_meshes + .keys() .map(|(batch_set_key, _)| batch_set_key), ) .zip(batch_sets.iter()) @@ -686,9 +747,9 @@ where let draw_functions = world.resource::>(); let mut draw_functions = draw_functions.write(); - for (batch_set_key, bin_key) in &self.unbatchable_mesh_keys { + for (batch_set_key, bin_key) in self.unbatchable_meshes.keys() { let unbatchable_entities = - &self.unbatchable_mesh_values[&(batch_set_key.clone(), bin_key.clone())]; + &self.unbatchable_meshes[&(batch_set_key.clone(), bin_key.clone())]; for (entity_index, entity) in unbatchable_entities.entities.iter().enumerate() { let unbatchable_dynamic_offset = match &unbatchable_entities.buffer_indices { UnbatchableBinnedEntityIndexSet::NoEntities => { @@ -714,7 +775,7 @@ where } }, }, - UnbatchableBinnedEntityIndexSet::Dense(ref dynamic_offsets) => { + UnbatchableBinnedEntityIndexSet::Dense(dynamic_offsets) => { dynamic_offsets[entity_index].clone() } }; @@ -752,14 +813,14 @@ where let draw_functions = world.resource::>(); let mut draw_functions = draw_functions.write(); - for ((batch_set_key, bin_key), bin) in &self.non_mesh_items { - for &entity in &bin.entities { + for ((batch_set_key, bin_key), non_mesh_entities) in &self.non_mesh_items { + for (main_entity, entity) in non_mesh_entities.entities.iter() { // Come up with a fake batch range and extra index. The draw // function is expected to manage any sort of batching logic itself. let binned_phase_item = BPI::new( batch_set_key.clone(), bin_key.clone(), - (Entity::PLACEHOLDER, entity), + (*entity, *main_entity), 0..1, PhaseItemExtraIndex::None, ); @@ -777,16 +838,13 @@ where } pub fn is_empty(&self) -> bool { - self.multidrawable_mesh_values.is_empty() - && self.batchable_mesh_values.is_empty() - && self.unbatchable_mesh_values.is_empty() + self.multidrawable_meshes.is_empty() + && self.batchable_meshes.is_empty() + && self.unbatchable_meshes.is_empty() && self.non_mesh_items.is_empty() } pub fn prepare_for_new_frame(&mut self) { - self.multidrawable_mesh_keys.clear(); - self.batchable_mesh_keys.clear(); - self.unbatchable_mesh_keys.clear(); self.batch_sets.clear(); self.valid_cached_entity_bin_keys.clear(); @@ -796,6 +854,10 @@ where .set_range(self.cached_entity_bin_keys.len().., true); self.entities_that_changed_bins.clear(); + + for unbatchable_bin in self.unbatchable_meshes.values_mut() { + unbatchable_bin.buffer_indices.clear(); + } } /// Checks to see whether the entity is in a bin and returns true if it's @@ -823,40 +885,46 @@ where /// Removes all entities not marked as clean from the bins. /// /// During `queue_material_meshes`, we process all visible entities and mark - /// each as clean as we come to it. Then we call this method, which removes - /// entities that aren't marked as clean from the bins. + /// each as clean as we come to it. Then, in [`sweep_old_entities`], we call + /// this method, which removes entities that aren't marked as clean from the + /// bins. pub fn sweep_old_entities(&mut self) { // Search for entities not marked as valid. We have to do this in // reverse order because `swap_remove_index` will potentially invalidate // all indices after the one we remove. for index in ReverseFixedBitSetZeroesIterator::new(&self.valid_cached_entity_bin_keys) { - // If we found an invalid entity, remove it. Note that this - // potentially invalidates later indices, but that's OK because - // we're going in reverse order. - let Some((entity, entity_bin_key)) = + let Some((entity, cached_binned_entity)) = self.cached_entity_bin_keys.swap_remove_index(index) else { continue; }; - remove_entity_from_bin( - entity, - &entity_bin_key, - &mut self.multidrawable_mesh_values, - &mut self.batchable_mesh_values, - &mut self.unbatchable_mesh_values, - &mut self.non_mesh_items, - ); + if let Some(ref cached_bin_key) = cached_binned_entity.cached_bin_key { + remove_entity_from_bin( + entity, + cached_bin_key, + &mut self.multidrawable_meshes, + &mut self.batchable_meshes, + &mut self.unbatchable_meshes, + &mut self.non_mesh_items, + ); + } } // If an entity changed bins, we need to remove it from its old bin. for entity_that_changed_bins in self.entities_that_changed_bins.drain(..) { + let Some(ref old_cached_bin_key) = entity_that_changed_bins + .old_cached_binned_entity + .cached_bin_key + else { + continue; + }; remove_entity_from_bin( entity_that_changed_bins.main_entity, - &entity_that_changed_bins.old_bin_key, - &mut self.multidrawable_mesh_values, - &mut self.batchable_mesh_values, - &mut self.unbatchable_mesh_values, + old_cached_bin_key, + &mut self.multidrawable_meshes, + &mut self.batchable_meshes, + &mut self.unbatchable_meshes, &mut self.non_mesh_items, ); } @@ -872,22 +940,19 @@ where fn remove_entity_from_bin( entity: MainEntity, entity_bin_key: &CachedBinKey, - multidrawable_mesh_values: &mut HashMap>, - batchable_mesh_values: &mut HashMap<(BPI::BatchSetKey, BPI::BinKey), RenderBin>, - unbatchable_mesh_values: &mut HashMap< - (BPI::BatchSetKey, BPI::BinKey), - UnbatchableBinnedEntities, - >, - non_mesh_items: &mut HashMap<(BPI::BatchSetKey, BPI::BinKey), RenderBin>, + multidrawable_meshes: &mut IndexMap>, + batchable_meshes: &mut IndexMap<(BPI::BatchSetKey, BPI::BinKey), RenderBin>, + unbatchable_meshes: &mut IndexMap<(BPI::BatchSetKey, BPI::BinKey), UnbatchableBinnedEntities>, + non_mesh_items: &mut IndexMap<(BPI::BatchSetKey, BPI::BinKey), NonMeshEntities>, ) where BPI: BinnedPhaseItem, { match entity_bin_key.phase_type { BinnedRenderPhaseType::MultidrawableMesh => { - if let Entry::Occupied(mut batch_set_entry) = - multidrawable_mesh_values.entry(entity_bin_key.batch_set_key.clone()) + if let indexmap::map::Entry::Occupied(mut batch_set_entry) = + multidrawable_meshes.entry(entity_bin_key.batch_set_key.clone()) { - if let Entry::Occupied(mut bin_entry) = batch_set_entry + if let indexmap::map::Entry::Occupied(mut bin_entry) = batch_set_entry .get_mut() .entry(entity_bin_key.bin_key.clone()) { @@ -895,19 +960,21 @@ fn remove_entity_from_bin( // If the bin is now empty, remove the bin. if bin_entry.get_mut().is_empty() { - bin_entry.remove(); + bin_entry.swap_remove(); } } - // If the batch set is now empty, remove it. + // If the batch set is now empty, remove it. This will perturb + // the order, but that's OK because we're going to sort the bin + // afterwards. if batch_set_entry.get_mut().is_empty() { - batch_set_entry.remove(); + batch_set_entry.swap_remove(); } } } BinnedRenderPhaseType::BatchableMesh => { - if let Entry::Occupied(mut bin_entry) = batchable_mesh_values.entry(( + if let indexmap::map::Entry::Occupied(mut bin_entry) = batchable_meshes.entry(( entity_bin_key.batch_set_key.clone(), entity_bin_key.bin_key.clone(), )) { @@ -915,13 +982,13 @@ fn remove_entity_from_bin( // If the bin is now empty, remove the bin. if bin_entry.get_mut().is_empty() { - bin_entry.remove(); + bin_entry.swap_remove(); } } } BinnedRenderPhaseType::UnbatchableMesh => { - if let Entry::Occupied(mut bin_entry) = unbatchable_mesh_values.entry(( + if let indexmap::map::Entry::Occupied(mut bin_entry) = unbatchable_meshes.entry(( entity_bin_key.batch_set_key.clone(), entity_bin_key.bin_key.clone(), )) { @@ -929,21 +996,21 @@ fn remove_entity_from_bin( // If the bin is now empty, remove the bin. if bin_entry.get_mut().entities.is_empty() { - bin_entry.remove(); + bin_entry.swap_remove(); } } } BinnedRenderPhaseType::NonMesh => { - if let Entry::Occupied(mut bin_entry) = non_mesh_items.entry(( + if let indexmap::map::Entry::Occupied(mut bin_entry) = non_mesh_items.entry(( entity_bin_key.batch_set_key.clone(), entity_bin_key.bin_key.clone(), )) { - bin_entry.get_mut().remove(entity); + bin_entry.get_mut().entities.remove(&entity); // If the bin is now empty, remove the bin. - if bin_entry.get_mut().is_empty() { - bin_entry.remove(); + if bin_entry.get_mut().entities.is_empty() { + bin_entry.swap_remove(); } } } @@ -956,13 +1023,10 @@ where { fn new(gpu_preprocessing: GpuPreprocessingMode) -> Self { Self { - multidrawable_mesh_keys: vec![], - multidrawable_mesh_values: HashMap::default(), - batchable_mesh_keys: vec![], - batchable_mesh_values: HashMap::default(), - unbatchable_mesh_keys: vec![], - unbatchable_mesh_values: HashMap::default(), - non_mesh_items: HashMap::default(), + multidrawable_meshes: IndexMap::default(), + batchable_meshes: IndexMap::default(), + unbatchable_meshes: IndexMap::default(), + non_mesh_items: IndexMap::default(), batch_sets: match gpu_preprocessing { GpuPreprocessingMode::Culling => { BinnedRenderPhaseBatchSets::MultidrawIndirect(vec![]) @@ -975,6 +1039,7 @@ where cached_entity_bin_keys: IndexMap::default(), valid_cached_entity_bin_keys: FixedBitSet::new(), entities_that_changed_bins: vec![], + gpu_preprocessing_mode: gpu_preprocessing, } } } @@ -1014,7 +1079,7 @@ impl UnbatchableBinnedEntityIndexSet { }, }) } - UnbatchableBinnedEntityIndexSet::Dense(ref indices) => { + UnbatchableBinnedEntityIndexSet::Dense(indices) => { indices.get(entity_index as usize).cloned() } } @@ -1026,18 +1091,26 @@ impl UnbatchableBinnedEntityIndexSet { /// /// This is the version used when the pipeline supports GPU preprocessing: e.g. /// 3D PBR meshes. -pub struct BinnedRenderPhasePlugin(PhantomData<(BPI, GFBD)>) -where - BPI: BinnedPhaseItem, - GFBD: GetFullBatchData; - -impl Default for BinnedRenderPhasePlugin +pub struct BinnedRenderPhasePlugin where BPI: BinnedPhaseItem, GFBD: GetFullBatchData, { - fn default() -> Self { - Self(PhantomData) + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, + phantom: PhantomData<(BPI, GFBD)>, +} + +impl BinnedRenderPhasePlugin +where + BPI: BinnedPhaseItem, + GFBD: GetFullBatchData, +{ + pub fn new(debug_flags: RenderDebugFlags) -> Self { + Self { + debug_flags, + phantom: PhantomData, + } } } @@ -1053,6 +1126,11 @@ where render_app .init_resource::>() + .init_resource::>() + .insert_resource(PhaseIndirectParametersBuffers::::new( + self.debug_flags + .contains(RenderDebugFlags::ALLOW_COPIES_FROM_INDIRECT_PARAMETERS), + )) .add_systems( Render, ( @@ -1068,6 +1146,14 @@ where ), ) .in_set(RenderSet::PrepareResources), + sweep_old_entities::.in_set(RenderSet::QueueSweep), + gpu_preprocessing::collect_buffers_for_phase:: + .run_if( + resource_exists::< + BatchedInstanceBuffers, + >, + ) + .in_set(RenderSet::PrepareResourcesCollectPhaseBuffers), ), ); } @@ -1111,18 +1197,26 @@ where /// /// This is the version used when the pipeline supports GPU preprocessing: e.g. /// 3D PBR meshes. -pub struct SortedRenderPhasePlugin(PhantomData<(SPI, GFBD)>) -where - SPI: SortedPhaseItem, - GFBD: GetFullBatchData; - -impl Default for SortedRenderPhasePlugin +pub struct SortedRenderPhasePlugin where SPI: SortedPhaseItem, GFBD: GetFullBatchData, { - fn default() -> Self { - Self(PhantomData) + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, + phantom: PhantomData<(SPI, GFBD)>, +} + +impl SortedRenderPhasePlugin +where + SPI: SortedPhaseItem, + GFBD: GetFullBatchData, +{ + pub fn new(debug_flags: RenderDebugFlags) -> Self { + Self { + debug_flags, + phantom: PhantomData, + } } } @@ -1138,18 +1232,33 @@ where render_app .init_resource::>() + .init_resource::>() + .insert_resource(PhaseIndirectParametersBuffers::::new( + self.debug_flags + .contains(RenderDebugFlags::ALLOW_COPIES_FROM_INDIRECT_PARAMETERS), + )) .add_systems( Render, ( - no_gpu_preprocessing::batch_and_prepare_sorted_render_phase:: - .run_if(resource_exists::>), - gpu_preprocessing::batch_and_prepare_sorted_render_phase::.run_if( - resource_exists::< - BatchedInstanceBuffers, - >, - ), - ) - .in_set(RenderSet::PrepareResources), + ( + no_gpu_preprocessing::batch_and_prepare_sorted_render_phase:: + .run_if(resource_exists::>), + gpu_preprocessing::batch_and_prepare_sorted_render_phase:: + .run_if( + resource_exists::< + BatchedInstanceBuffers, + >, + ), + ) + .in_set(RenderSet::PrepareResources), + gpu_preprocessing::collect_buffers_for_phase:: + .run_if( + resource_exists::< + BatchedInstanceBuffers, + >, + ) + .in_set(RenderSet::PrepareResourcesCollectPhaseBuffers), + ), ); } } @@ -1190,7 +1299,7 @@ impl UnbatchableBinnedEntityIndexSet { } UnbatchableBinnedEntityIndexSet::Sparse { - ref mut instance_range, + instance_range, first_indirect_parameters_index, } if instance_range.end == indices.instance_index && ((first_indirect_parameters_index.is_none() @@ -1221,6 +1330,10 @@ impl UnbatchableBinnedEntityIndexSet { // but let's go ahead and do the sensible thing anyhow: demote // the compressed `NoDynamicOffsets` field to the full // `DynamicOffsets` array. + warn!( + "Unbatchable binned entity index set was demoted from sparse to dense. \ + This is a bug in the renderer. Please report it.", + ); let new_dynamic_offsets = (0..instance_range.len() as u32) .flat_map(|entity_index| self.indices_for_entity_index(entity_index)) .chain(iter::once(indices)) @@ -1228,11 +1341,22 @@ impl UnbatchableBinnedEntityIndexSet { *self = UnbatchableBinnedEntityIndexSet::Dense(new_dynamic_offsets); } - UnbatchableBinnedEntityIndexSet::Dense(ref mut dense_indices) => { + UnbatchableBinnedEntityIndexSet::Dense(dense_indices) => { dense_indices.push(indices); } } } + + /// Clears the unbatchable binned entity index set. + fn clear(&mut self) { + match self { + UnbatchableBinnedEntityIndexSet::Dense(dense_indices) => dense_indices.clear(), + UnbatchableBinnedEntityIndexSet::Sparse { .. } => { + *self = UnbatchableBinnedEntityIndexSet::NoEntities; + } + _ => {} + } + } } /// A collection of all items to be rendered that will be encoded to GPU @@ -1350,15 +1474,15 @@ where /// [`SortedPhaseItem`]s. /// /// * Binned phase items have a `BinKey` which specifies what bin they're to be -/// placed in. All items in the same bin are eligible to be batched together. -/// The `BinKey`s are sorted, but the individual bin items aren't. Binned phase -/// items are good for opaque meshes, in which the order of rendering isn't -/// important. Generally, binned phase items are faster than sorted phase items. +/// placed in. All items in the same bin are eligible to be batched together. +/// The `BinKey`s are sorted, but the individual bin items aren't. Binned phase +/// items are good for opaque meshes, in which the order of rendering isn't +/// important. Generally, binned phase items are faster than sorted phase items. /// /// * Sorted phase items, on the other hand, are placed into one large buffer -/// and then sorted all at once. This is needed for transparent meshes, which -/// have to be sorted back-to-front to render with the painter's algorithm. -/// These types of phase items are generally slower than binned phase items. +/// and then sorted all at once. This is needed for transparent meshes, which +/// have to be sorted back-to-front to render with the painter's algorithm. +/// These types of phase items are generally slower than binned phase items. pub trait PhaseItem: Sized + Send + Sync + 'static { /// Whether or not this `PhaseItem` should be subjected to automatic batching. (Default: `true`) const AUTOMATIC_BATCHING: bool = true; @@ -1399,12 +1523,12 @@ pub trait PhaseItem: Sized + Send + Sync + 'static { /// instances they already have. These can be: /// /// * The *dynamic offset*: a `wgpu` dynamic offset into the uniform buffer of -/// instance data. This is used on platforms that don't support storage -/// buffers, to work around uniform buffer size limitations. +/// instance data. This is used on platforms that don't support storage +/// buffers, to work around uniform buffer size limitations. /// /// * The *indirect parameters index*: an index into the buffer that specifies -/// the indirect parameters for this [`PhaseItem`]'s drawcall. This is used when -/// indirect mode is on (as used for GPU culling). +/// the indirect parameters for this [`PhaseItem`]'s drawcall. This is used when +/// indirect mode is on (as used for GPU culling). /// /// Note that our indirect draw functionality requires storage buffers, so it's /// impossible to have both a dynamic offset and an indirect parameters index. @@ -1605,6 +1729,18 @@ where } } +/// Removes entities that became invisible or changed phases from the bins. +/// +/// This must run after queuing. +pub fn sweep_old_entities(mut render_phases: ResMut>) +where + BPI: BinnedPhaseItem, +{ + for phase in render_phases.0.values_mut() { + phase.sweep_old_entities(); + } +} + impl BinnedRenderPhaseType { pub fn mesh( batchable: bool, @@ -1620,15 +1756,15 @@ impl BinnedRenderPhaseType { impl RenderBin { /// Creates a [`RenderBin`] containing a single entity. - fn from_entity(entity: MainEntity) -> RenderBin { - let mut entities = IndexSet::default(); - entities.insert(entity); + fn from_entity(entity: MainEntity, uniform_index: InputUniformIndex) -> RenderBin { + let mut entities = IndexMap::default(); + entities.insert(entity, uniform_index); RenderBin { entities } } /// Inserts an entity into the bin. - fn insert(&mut self, entity: MainEntity) { - self.entities.insert(entity); + fn insert(&mut self, entity: MainEntity, uniform_index: InputUniformIndex) { + self.entities.insert(entity, uniform_index); } /// Removes an entity from the bin. @@ -1641,9 +1777,10 @@ impl RenderBin { self.entities.is_empty() } - /// Returns the [`IndexSet`] containing all the entities in the bin. + /// Returns the [`IndexMap`] containing all the entities in the bin, along + /// with the cached [`InputUniformIndex`] of each. #[inline] - pub fn entities(&self) -> &IndexSet { + pub fn entities(&self) -> &IndexMap { &self.entities } } diff --git a/crates/bevy_render/src/render_resource/bind_group.rs b/crates/bevy_render/src/render_resource/bind_group.rs index 9b29e453a7..2c8e984bfd 100644 --- a/crates/bevy_render/src/render_resource/bind_group.rs +++ b/crates/bevy_render/src/render_resource/bind_group.rs @@ -12,7 +12,11 @@ pub use bevy_render_macros::AsBindGroup; use core::ops::Deref; use encase::ShaderType; use thiserror::Error; -use wgpu::{BindGroupEntry, BindGroupLayoutEntry, BindingResource, TextureViewDimension}; +use wgpu::{ + BindGroupEntry, BindGroupLayoutEntry, BindingResource, SamplerBindingType, TextureViewDimension, +}; + +use super::{BindlessDescriptor, BindlessSlabResourceLimit}; define_atomic_id!(BindGroupId); @@ -144,16 +148,16 @@ impl Deref for BindGroup { /// ## `uniform(BINDING_INDEX)` /// /// * The field will be converted to a shader-compatible type using the [`ShaderType`] trait, written to a [`Buffer`], and bound as a uniform. -/// [`ShaderType`] is implemented for most math types already, such as [`f32`], [`Vec4`](bevy_math::Vec4), and -/// [`LinearRgba`](bevy_color::LinearRgba). It can also be derived for custom structs. +/// [`ShaderType`] is implemented for most math types already, such as [`f32`], [`Vec4`](bevy_math::Vec4), and +/// [`LinearRgba`](bevy_color::LinearRgba). It can also be derived for custom structs. /// /// ## `texture(BINDING_INDEX, arguments)` /// /// * This field's [`Handle`](bevy_asset::Handle) will be used to look up the matching [`Texture`](crate::render_resource::Texture) -/// GPU resource, which will be bound as a texture in shaders. The field will be assumed to implement [`Into>>`]. In practice, -/// most fields should be a [`Handle`](bevy_asset::Handle) or [`Option>`]. If the value of an [`Option>`] is -/// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. This attribute can be used in conjunction with a `sampler` binding attribute -/// (with a different binding index) if a binding of the sampler for the [`Image`](bevy_image::Image) is also required. +/// GPU resource, which will be bound as a texture in shaders. The field will be assumed to implement [`Into>>`]. In practice, +/// most fields should be a [`Handle`](bevy_asset::Handle) or [`Option>`]. If the value of an [`Option>`] is +/// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. This attribute can be used in conjunction with a `sampler` binding attribute +/// (with a different binding index) if a binding of the sampler for the [`Image`](bevy_image::Image) is also required. /// /// | Arguments | Values | Default | /// |-----------------------|-------------------------------------------------------------------------|----------------------| @@ -166,9 +170,9 @@ impl Deref for BindGroup { /// ## `storage_texture(BINDING_INDEX, arguments)` /// /// * This field's [`Handle`](bevy_asset::Handle) will be used to look up the matching [`Texture`](crate::render_resource::Texture) -/// GPU resource, which will be bound as a storage texture in shaders. The field will be assumed to implement [`Into>>`]. In practice, -/// most fields should be a [`Handle`](bevy_asset::Handle) or [`Option>`]. If the value of an [`Option>`] is -/// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. +/// GPU resource, which will be bound as a storage texture in shaders. The field will be assumed to implement [`Into>>`]. In practice, +/// most fields should be a [`Handle`](bevy_asset::Handle) or [`Option>`]. If the value of an [`Option>`] is +/// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. /// /// | Arguments | Values | Default | /// |------------------------|--------------------------------------------------------------------------------------------|---------------| @@ -180,10 +184,10 @@ impl Deref for BindGroup { /// ## `sampler(BINDING_INDEX, arguments)` /// /// * This field's [`Handle`](bevy_asset::Handle) will be used to look up the matching [`Sampler`] GPU -/// resource, which will be bound as a sampler in shaders. The field will be assumed to implement [`Into>>`]. In practice, -/// most fields should be a [`Handle`](bevy_asset::Handle) or [`Option>`]. If the value of an [`Option>`] is -/// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. This attribute can be used in conjunction with a `texture` binding attribute -/// (with a different binding index) if a binding of the texture for the [`Image`](bevy_image::Image) is also required. +/// resource, which will be bound as a sampler in shaders. The field will be assumed to implement [`Into>>`]. In practice, +/// most fields should be a [`Handle`](bevy_asset::Handle) or [`Option>`]. If the value of an [`Option>`] is +/// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. This attribute can be used in conjunction with a `texture` binding attribute +/// (with a different binding index) if a binding of the texture for the [`Image`](bevy_image::Image) is also required. /// /// | Arguments | Values | Default | /// |------------------------|-------------------------------------------------------------------------|------------------------| @@ -192,15 +196,19 @@ impl Deref for BindGroup { /// /// ## `storage(BINDING_INDEX, arguments)` /// -/// * The field's [`Handle`](bevy_asset::Handle) will be used to look up the matching [`Buffer`] GPU resource, which -/// will be bound as a storage buffer in shaders. If the `storage` attribute is used, the field is expected a raw -/// buffer, and the buffer will be bound as a storage buffer in shaders. +/// * The field's [`Handle`](bevy_asset::Handle) will be used to look +/// up the matching [`Buffer`] GPU resource, which will be bound as a storage +/// buffer in shaders. If the `storage` attribute is used, the field is expected +/// a raw buffer, and the buffer will be bound as a storage buffer in shaders. +/// In bindless mode, `binding_array()` argument that specifies the binding +/// number of the resulting storage buffer binding array must be present. /// -/// | Arguments | Values | Default | -/// |------------------------|-------------------------------------------------------------------------|----------------------| -/// | `visibility(...)` | `all`, `none`, or a list-combination of `vertex`, `fragment`, `compute` | `vertex`, `fragment` | -/// | `read_only` | if present then value is true, otherwise false | `false` | -/// | `buffer` | if present then the field will be assumed to be a raw wgpu buffer | | +/// | Arguments | Values | Default | +/// |------------------------|-------------------------------------------------------------------------|------------------------| +/// | `visibility(...)` | `all`, `none`, or a list-combination of `vertex`, `fragment`, `compute` | `vertex`, `fragment` | +/// | `read_only` | if present then value is true, otherwise false | `false` | +/// | `buffer` | if present then the field will be assumed to be a raw wgpu buffer | | +/// | `binding_array(...)` | the binding number of the binding array, for bindless mode | bindless mode disabled | /// /// Note that fields without field-level binding attributes will be ignored. /// ``` @@ -259,44 +267,179 @@ impl Deref for BindGroup { /// Some less common scenarios will require "struct-level" attributes. These are the currently supported struct-level attributes: /// ## `uniform(BINDING_INDEX, ConvertedShaderType)` /// -/// * This also creates a [`Buffer`] using [`ShaderType`] and binds it as a uniform, much -/// like the field-level `uniform` attribute. The difference is that the entire [`AsBindGroup`] value is converted to `ConvertedShaderType`, -/// which must implement [`ShaderType`], instead of a specific field implementing [`ShaderType`]. This is useful if more complicated conversion -/// logic is required. The conversion is done using the [`AsBindGroupShaderType`] trait, which is automatically implemented -/// if `&Self` implements [`Into`]. Only use [`AsBindGroupShaderType`] if access to resources like [`RenderAssets`] is -/// required. +/// * This also creates a [`Buffer`] using [`ShaderType`] and binds it as a +/// uniform, much like the field-level `uniform` attribute. The difference is +/// that the entire [`AsBindGroup`] value is converted to `ConvertedShaderType`, +/// which must implement [`ShaderType`], instead of a specific field +/// implementing [`ShaderType`]. This is useful if more complicated conversion +/// logic is required, or when using bindless mode (see below). The conversion +/// is done using the [`AsBindGroupShaderType`] trait, +/// which is automatically implemented if `&Self` implements +/// [`Into`]. Outside of bindless mode, only use +/// [`AsBindGroupShaderType`] if access to resources like +/// [`RenderAssets`] is required. +/// +/// * In bindless mode (see `bindless(COUNT)`), this attribute becomes +/// `uniform(BINDLESS_INDEX, ConvertedShaderType, +/// binding_array(BINDING_INDEX))`. The resulting uniform buffers will be +/// available in the shader as a binding array at the given `BINDING_INDEX`. The +/// `BINDLESS_INDEX` specifies the offset of the buffer in the bindless index +/// table. +/// +/// For example, suppose that the material slot is stored in a variable named +/// `slot`, the bindless index table is named `material_indices`, and that the +/// first field (index 0) of the bindless index table type is named +/// `material`. Then specifying `#[uniform(0, StandardMaterialUniform, +/// binding_array(10)]` will create a binding array buffer declared in the +/// shader as `var material_array: +/// binding_array` and accessible as +/// `material_array[material_indices[slot].material]`. +/// +/// ## `data(BINDING_INDEX, ConvertedShaderType, binding_array(BINDING_INDEX))` +/// +/// * This is very similar to `uniform(BINDING_INDEX, ConvertedShaderType, +/// binding_array(BINDING_INDEX)` and in fact is identical if bindless mode +/// isn't being used. The difference is that, in bindless mode, the `data` +/// attribute produces a single buffer containing an array, not an array of +/// buffers. For example, suppose you had the following declaration: +/// +/// ```ignore +/// #[uniform(0, StandardMaterialUniform, binding_array(10))] +/// struct StandardMaterial { ... } +/// ``` +/// +/// In bindless mode, this will produce a binding matching the following WGSL +/// declaration: +/// +/// ```wgsl +/// @group(2) @binding(10) var material_array: binding_array; +/// ``` +/// +/// On the other hand, if you write this declaration: +/// +/// ```ignore +/// #[data(0, StandardMaterialUniform, binding_array(10))] +/// struct StandardMaterial { ... } +/// ``` +/// +/// Then Bevy produces a binding that matches this WGSL declaration instead: +/// +/// ```wgsl +/// @group(2) @binding(10) var material_array: array; +/// ``` +/// +/// * Just as with the structure-level `uniform` attribute, Bevy converts the +/// entire [`AsBindGroup`] to `ConvertedShaderType`, using the +/// [`AsBindGroupShaderType`] trait. +/// +/// * In non-bindless mode, the structure-level `data` attribute is the same as +/// the structure-level `uniform` attribute and produces a single uniform buffer +/// in the shader. The above example would result in a binding that looks like +/// this in WGSL in non-bindless mode: +/// +/// ```wgsl +/// @group(2) @binding(0) var material: StandardMaterial; +/// ``` +/// +/// * For efficiency reasons, `data` is generally preferred over `uniform` +/// unless you need to place your data in individual buffers. /// /// ## `bind_group_data(DataType)` /// /// * The [`AsBindGroup`] type will be converted to some `DataType` using [`Into`] and stored -/// as [`AsBindGroup::Data`] as part of the [`AsBindGroup::as_bind_group`] call. This is useful if data needs to be stored alongside -/// the generated bind group, such as a unique identifier for a material's bind group. The most common use case for this attribute -/// is "shader pipeline specialization". See [`SpecializedRenderPipeline`](crate::render_resource::SpecializedRenderPipeline). +/// as [`AsBindGroup::Data`] as part of the [`AsBindGroup::as_bind_group`] call. This is useful if data needs to be stored alongside +/// the generated bind group, such as a unique identifier for a material's bind group. The most common use case for this attribute +/// is "shader pipeline specialization". See [`SpecializedRenderPipeline`](crate::render_resource::SpecializedRenderPipeline). /// -/// ## `bindless(COUNT)` +/// ## `bindless` /// /// * This switch enables *bindless resources*, which changes the way Bevy -/// supplies resources (uniforms, textures, and samplers) to the shader. -/// When bindless resources are enabled, and the current platform supports -/// them, instead of presenting a single instance of a resource to your -/// shader Bevy will instead present a *binding array* of `COUNT` elements. -/// In your shader, the index of the element of each binding array -/// corresponding to the mesh currently being drawn can be retrieved with -/// `mesh[in.instance_index].material_and_lightmap_bind_group_slot & -/// 0xffffu`. -/// * Bindless uniforms don't exist, so in bindless mode all uniforms and -/// uniform buffers are automatically replaced with read-only storage -/// buffers. -/// * The purpose of bindless mode is to improve performance by reducing -/// state changes. By grouping resources together into binding arrays, Bevy -/// doesn't have to modify GPU state as often, decreasing API and driver -/// overhead. +/// supplies resources (textures, and samplers) to the shader. When bindless +/// resources are enabled, and the current platform supports them, Bevy will +/// allocate textures, and samplers into *binding arrays*, separated based on +/// type and will supply your shader with indices into those arrays. +/// * Bindless textures and samplers are placed into the appropriate global +/// array defined in `bevy_render::bindless` (`bindless.wgsl`). +/// * Bevy doesn't currently support bindless buffers, except for those created +/// with the `uniform(BINDLESS_INDEX, ConvertedShaderType, +/// binding_array(BINDING_INDEX))` attribute. If you need to include a buffer in +/// your object, and you can't create the data in that buffer with the `uniform` +/// attribute, consider a non-bindless object instead. /// * If bindless mode is enabled, the `BINDLESS` definition will be /// available. Because not all platforms support bindless resources, you /// should check for the presence of this definition via `#ifdef` and fall /// back to standard bindings if it isn't present. -/// * See the `shaders/shader_material_bindless` example for an example of -/// how to use bindless mode. +/// * By default, in bindless mode, binding 0 becomes the *bindless index +/// table*, which is an array of structures, each of which contains as many +/// fields of type `u32` as the highest binding number in the structure +/// annotated with `#[derive(AsBindGroup)]`. Again by default, the *i*th field +/// of the bindless index table contains the index of the resource with binding +/// *i* within the appropriate binding array. +/// * In the case of materials, the index of the applicable table within the +/// bindless index table list corresponding to the mesh currently being drawn +/// can be retrieved with +/// `mesh[in.instance_index].material_and_lightmap_bind_group_slot & 0xffffu`. +/// * You can limit the size of the bindless slabs to N resources with the +/// `limit(N)` declaration. For example, `#[bindless(limit(16))]` ensures that +/// each slab will have no more than 16 total resources in it. If you don't +/// specify a limit, Bevy automatically picks a reasonable one for the current +/// platform. +/// * The `index_table(range(M..N), binding(B))` declaration allows you to +/// customize the layout of the bindless index table. This is useful for +/// materials that are composed of multiple bind groups, such as +/// `ExtendedMaterial`. In such cases, there will be multiple bindless index +/// tables, so they can't both be assigned to binding 0 or their bindings will +/// conflict. +/// - The `binding(B)` attribute of the `index_table` attribute allows you to +/// customize the binding (`@binding(B)`, in the shader) at which the index +/// table will be bound. +/// - The `range(M, N)` attribute of the `index_table` attribute allows you to +/// change the mapping from the field index in the bindless index table to the +/// bindless index. Instead of the field at index $i$ being mapped to the +/// bindless index $i$, with the `range(M, N)` attribute the field at index +/// $i$ in the bindless index table is mapped to the bindless index $i$ + M. +/// The size of the index table will be set to N - M. Note that this may +/// result in the table being too small to contain all the bindless bindings. +/// * The purpose of bindless mode is to improve performance by reducing +/// state changes. By grouping resources together into binding arrays, Bevy +/// doesn't have to modify GPU state as often, decreasing API and driver +/// overhead. +/// * See the `shaders/shader_material_bindless` example for an example of how +/// to use bindless mode. See the `shaders/extended_material_bindless` example +/// for a more exotic example of bindless mode that demonstrates the +/// `index_table` attribute. +/// * The following diagram illustrates how bindless mode works using a subset +/// of `StandardMaterial`: +/// +/// ```text +/// Shader Bindings Sampler Binding Array +/// +----+-----------------------------+ +-----------+-----------+-----+ +/// +---| 0 | material_indices | +->| sampler 0 | sampler 1 | ... | +/// | +----+-----------------------------+ | +-----------+-----------+-----+ +/// | | 1 | bindless_samplers_filtering +--+ ^ +/// | +----+-----------------------------+ +-------------------------------+ +/// | | .. | ... | | +/// | +----+-----------------------------+ Texture Binding Array | +/// | | 5 | bindless_textures_2d +--+ +-----------+-----------+-----+ | +/// | +----+-----------------------------+ +->| texture 0 | texture 1 | ... | | +/// | | .. | ... | +-----------+-----------+-----+ | +/// | +----+-----------------------------+ ^ | +/// | + 10 | material_array +--+ +---------------------------+ | +/// | +----+-----------------------------+ | | | +/// | | Buffer Binding Array | | +/// | | +----------+----------+-----+ | | +/// | +->| buffer 0 | buffer 1 | ... | | | +/// | Material Bindless Indices +----------+----------+-----+ | | +/// | +----+-----------------------------+ ^ | | +/// +-->| 0 | material +----------+ | | +/// +----+-----------------------------+ | | +/// | 1 | base_color_texture +---------------------------------------+ | +/// +----+-----------------------------+ | +/// | 2 | base_color_sampler +-------------------------------------------+ +/// +----+-----------------------------+ +/// | .. | ... | +/// +----+-----------------------------+ +/// ``` /// /// The previous `CoolMaterial` example illustrating "combining multiple field-level uniform attributes with the same binding index" can /// also be equivalently represented with a single struct-level uniform attribute: @@ -364,7 +507,7 @@ pub trait AsBindGroup { /// Note that the *actual* slot count may be different from this value, due /// to platform limitations. For example, if bindless resources aren't /// supported on this platform, the actual slot count will be 1. - fn bindless_slot_count() -> Option { + fn bindless_slot_count() -> Option { None } @@ -451,6 +594,10 @@ pub trait AsBindGroup { ) -> Vec where Self: Sized; + + fn bindless_descriptor() -> Option { + None + } } /// An error that occurs during [`AsBindGroup::as_bind_group`] calls. @@ -490,15 +637,30 @@ pub struct BindingResources(pub Vec<(u32, OwnedBindingResource)>); pub enum OwnedBindingResource { Buffer(Buffer), TextureView(TextureViewDimension, TextureView), - Sampler(Sampler), + Sampler(SamplerBindingType, Sampler), + Data(OwnedData), } +/// Data that will be copied into a GPU buffer. +/// +/// This corresponds to the `#[data]` attribute in `AsBindGroup`. +#[derive(Debug, Deref, DerefMut)] +pub struct OwnedData(pub Vec); + impl OwnedBindingResource { + /// Creates a [`BindingResource`] reference to this + /// [`OwnedBindingResource`]. + /// + /// Note that this operation panics if passed a + /// [`OwnedBindingResource::Data`], because [`OwnedData`] doesn't itself + /// correspond to any binding and instead requires the + /// `MaterialBindGroupAllocator` to pack it into a buffer. pub fn get_binding(&self) -> BindingResource { match self { OwnedBindingResource::Buffer(buffer) => buffer.as_entire_binding(), OwnedBindingResource::TextureView(_, view) => BindingResource::TextureView(view), - OwnedBindingResource::Sampler(sampler) => BindingResource::Sampler(sampler), + OwnedBindingResource::Sampler(_, sampler) => BindingResource::Sampler(sampler), + OwnedBindingResource::Data(_) => panic!("`OwnedData` has no binding resource"), } } } diff --git a/crates/bevy_render/src/render_resource/bindless.rs b/crates/bevy_render/src/render_resource/bindless.rs new file mode 100644 index 0000000000..64a0fa2c1f --- /dev/null +++ b/crates/bevy_render/src/render_resource/bindless.rs @@ -0,0 +1,344 @@ +//! Types and functions relating to bindless resources. + +use alloc::borrow::Cow; +use core::{ + num::{NonZeroU32, NonZeroU64}, + ops::Range, +}; + +use bevy_derive::{Deref, DerefMut}; +use wgpu::{ + BindGroupLayoutEntry, SamplerBindingType, ShaderStages, TextureSampleType, TextureViewDimension, +}; + +use crate::render_resource::binding_types::storage_buffer_read_only_sized; + +use super::binding_types::{ + sampler, texture_1d, texture_2d, texture_2d_array, texture_3d, texture_cube, texture_cube_array, +}; + +/// The default value for the number of resources that can be stored in a slab +/// on this platform. +/// +/// See the documentation for [`BindlessSlabResourceLimit`] for more +/// information. +#[cfg(any(target_os = "macos", target_os = "ios"))] +pub const AUTO_BINDLESS_SLAB_RESOURCE_LIMIT: u32 = 64; +/// The default value for the number of resources that can be stored in a slab +/// on this platform. +/// +/// See the documentation for [`BindlessSlabResourceLimit`] for more +/// information. +#[cfg(not(any(target_os = "macos", target_os = "ios")))] +pub const AUTO_BINDLESS_SLAB_RESOURCE_LIMIT: u32 = 2048; + +/// The binding numbers for the built-in binding arrays of each bindless +/// resource type. +/// +/// In the case of materials, the material allocator manages these binding +/// arrays. +/// +/// `bindless.wgsl` contains declarations of these arrays for use in your +/// shaders. If you change these, make sure to update that file as well. +pub static BINDING_NUMBERS: [(BindlessResourceType, BindingNumber); 9] = [ + (BindlessResourceType::SamplerFiltering, BindingNumber(1)), + (BindlessResourceType::SamplerNonFiltering, BindingNumber(2)), + (BindlessResourceType::SamplerComparison, BindingNumber(3)), + (BindlessResourceType::Texture1d, BindingNumber(4)), + (BindlessResourceType::Texture2d, BindingNumber(5)), + (BindlessResourceType::Texture2dArray, BindingNumber(6)), + (BindlessResourceType::Texture3d, BindingNumber(7)), + (BindlessResourceType::TextureCube, BindingNumber(8)), + (BindlessResourceType::TextureCubeArray, BindingNumber(9)), +]; + +/// The maximum number of resources that can be stored in a slab. +/// +/// This limit primarily exists in order to work around `wgpu` performance +/// problems involving large numbers of bindless resources. Also, some +/// platforms, such as Metal, currently enforce limits on the number of +/// resources in use. +/// +/// This corresponds to `LIMIT` in the `#[bindless(LIMIT)]` attribute when +/// deriving [`crate::render_resource::AsBindGroup`]. +#[derive(Clone, Copy, Default, PartialEq, Debug)] +pub enum BindlessSlabResourceLimit { + /// Allows the renderer to choose a reasonable value for the resource limit + /// based on the platform. + /// + /// This value has been tuned, so you should default to this value unless + /// you have special platform-specific considerations that prevent you from + /// using it. + #[default] + Auto, + + /// A custom value for the resource limit. + /// + /// Bevy will allocate no more than this number of resources in a slab, + /// unless exceeding this value is necessary in order to allocate at all + /// (i.e. unless the number of bindless resources in your bind group exceeds + /// this value), in which case Bevy can exceed it. + Custom(u32), +} + +/// Information about the bindless resources in this object. +/// +/// The material bind group allocator uses this descriptor in order to create +/// and maintain bind groups. The fields within this bindless descriptor are +/// [`Cow`]s in order to support both the common case in which the fields are +/// simply `static` constants and the more unusual case in which the fields are +/// dynamically generated efficiently. An example of the latter case is +/// `ExtendedMaterial`, which needs to assemble a bindless descriptor from those +/// of the base material and the material extension at runtime. +/// +/// This structure will only be present if this object is bindless. +pub struct BindlessDescriptor { + /// The bindless resource types that this object uses, in order of bindless + /// index. + /// + /// The resource assigned to binding index 0 will be at index 0, the + /// resource assigned to binding index will be at index 1 in this array, and + /// so on. Unused binding indices are set to [`BindlessResourceType::None`]. + pub resources: Cow<'static, [BindlessResourceType]>, + /// The [`BindlessBufferDescriptor`] for each bindless buffer that this + /// object uses. + /// + /// The order of this array is irrelevant. + pub buffers: Cow<'static, [BindlessBufferDescriptor]>, + /// The [`BindlessIndexTableDescriptor`]s describing each bindless index + /// table. + /// + /// This list must be sorted by the first bindless index. + pub index_tables: Cow<'static, [BindlessIndexTableDescriptor]>, +} + +/// The type of potentially-bindless resource. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum BindlessResourceType { + /// No bindless resource. + /// + /// This is used as a placeholder to fill holes in the + /// [`BindlessDescriptor::resources`] list. + None, + /// A storage buffer. + Buffer, + /// A filtering sampler. + SamplerFiltering, + /// A non-filtering sampler (nearest neighbor). + SamplerNonFiltering, + /// A comparison sampler (typically used for shadow maps). + SamplerComparison, + /// A 1D texture. + Texture1d, + /// A 2D texture. + Texture2d, + /// A 2D texture array. + /// + /// Note that this differs from a binding array. 2D texture arrays must all + /// have the same size and format. + Texture2dArray, + /// A 3D texture. + Texture3d, + /// A cubemap texture. + TextureCube, + /// A cubemap texture array. + /// + /// Note that this differs from a binding array. Cubemap texture arrays must + /// all have the same size and format. + TextureCubeArray, + /// Multiple instances of plain old data concatenated into a single buffer. + /// + /// This corresponds to the `#[data]` declaration in + /// [`crate::render_resource::AsBindGroup`]. + /// + /// Note that this resource doesn't itself map to a GPU-level binding + /// resource and instead depends on the `MaterialBindGroupAllocator` to + /// create a binding resource for it. + DataBuffer, +} + +/// Describes a bindless buffer. +/// +/// Unlike samplers and textures, each buffer in a bind group gets its own +/// unique bind group entry. That is, there isn't any `bindless_buffers` binding +/// array to go along with `bindless_textures_2d`, +/// `bindless_samplers_filtering`, etc. Therefore, this descriptor contains two +/// indices: the *binding number* and the *bindless index*. The binding number +/// is the `@binding` number used in the shader, while the bindless index is the +/// index of the buffer in the bindless index table (which is itself +/// conventionally bound to binding number 0). +/// +/// When declaring the buffer in a derived implementation +/// [`crate::render_resource::AsBindGroup`] with syntax like +/// `#[uniform(BINDLESS_INDEX, StandardMaterialUniform, +/// bindless(BINDING_NUMBER)]`, the bindless index is `BINDLESS_INDEX`, and the +/// binding number is `BINDING_NUMBER`. Note the order. +#[derive(Clone, Copy, Debug)] +pub struct BindlessBufferDescriptor { + /// The actual binding number of the buffer. + /// + /// This is declared with `@binding` in WGSL. When deriving + /// [`crate::render_resource::AsBindGroup`], this is the `BINDING_NUMBER` in + /// `#[uniform(BINDLESS_INDEX, StandardMaterialUniform, + /// bindless(BINDING_NUMBER)]`. + pub binding_number: BindingNumber, + /// The index of the buffer in the bindless index table. + /// + /// In the shader, this is the index into the table bound to binding 0. When + /// deriving [`crate::render_resource::AsBindGroup`], this is the + /// `BINDLESS_INDEX` in `#[uniform(BINDLESS_INDEX, StandardMaterialUniform, + /// bindless(BINDING_NUMBER)]`. + pub bindless_index: BindlessIndex, + /// The size of the buffer in bytes, if known. + pub size: Option, +} + +/// Describes the layout of the bindless index table, which maps bindless +/// indices to indices within the binding arrays. +#[derive(Clone)] +pub struct BindlessIndexTableDescriptor { + /// The range of bindless indices that this descriptor covers. + pub indices: Range, + /// The binding at which the index table itself will be bound. + /// + /// By default, this is binding 0, but it can be changed with the + /// `#[bindless(index_table(binding(B)))]` attribute. + pub binding_number: BindingNumber, +} + +/// The index of the actual binding in the bind group. +/// +/// This is the value specified in WGSL as `@binding`. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Deref, DerefMut)] +pub struct BindingNumber(pub u32); + +/// The index in the bindless index table. +/// +/// This table is conventionally bound to binding number 0. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Hash, Debug, Deref, DerefMut)] +pub struct BindlessIndex(pub u32); + +/// Creates the bind group layout entries common to all shaders that use +/// bindless bind groups. +/// +/// `bindless_resource_count` specifies the total number of bindless resources. +/// `bindless_slab_resource_limit` specifies the resolved +/// [`BindlessSlabResourceLimit`] value. +pub fn create_bindless_bind_group_layout_entries( + bindless_index_table_length: u32, + bindless_slab_resource_limit: u32, + bindless_index_table_binding_number: BindingNumber, +) -> Vec { + let bindless_slab_resource_limit = + NonZeroU32::new(bindless_slab_resource_limit).expect("Bindless slot count must be nonzero"); + + // The maximum size of a binding array is the + // `bindless_slab_resource_limit`, which would occur if all of the bindless + // resources were of the same type. So we create our binding arrays with + // that size. + + vec![ + // Start with the bindless index table, bound to binding number 0. + storage_buffer_read_only_sized( + false, + NonZeroU64::new(bindless_index_table_length as u64 * size_of::() as u64), + ) + .build(*bindless_index_table_binding_number, ShaderStages::all()), + // Continue with the common bindless resource arrays. + sampler(SamplerBindingType::Filtering) + .count(bindless_slab_resource_limit) + .build(1, ShaderStages::all()), + sampler(SamplerBindingType::NonFiltering) + .count(bindless_slab_resource_limit) + .build(2, ShaderStages::all()), + sampler(SamplerBindingType::Comparison) + .count(bindless_slab_resource_limit) + .build(3, ShaderStages::all()), + texture_1d(TextureSampleType::Float { filterable: true }) + .count(bindless_slab_resource_limit) + .build(4, ShaderStages::all()), + texture_2d(TextureSampleType::Float { filterable: true }) + .count(bindless_slab_resource_limit) + .build(5, ShaderStages::all()), + texture_2d_array(TextureSampleType::Float { filterable: true }) + .count(bindless_slab_resource_limit) + .build(6, ShaderStages::all()), + texture_3d(TextureSampleType::Float { filterable: true }) + .count(bindless_slab_resource_limit) + .build(7, ShaderStages::all()), + texture_cube(TextureSampleType::Float { filterable: true }) + .count(bindless_slab_resource_limit) + .build(8, ShaderStages::all()), + texture_cube_array(TextureSampleType::Float { filterable: true }) + .count(bindless_slab_resource_limit) + .build(9, ShaderStages::all()), + ] +} + +impl BindlessSlabResourceLimit { + /// Determines the actual bindless slab resource limit on this platform. + pub fn resolve(&self) -> u32 { + match *self { + BindlessSlabResourceLimit::Auto => AUTO_BINDLESS_SLAB_RESOURCE_LIMIT, + BindlessSlabResourceLimit::Custom(limit) => limit, + } + } +} + +impl BindlessResourceType { + /// Returns the binding number for the common array of this resource type. + /// + /// For example, if you pass `BindlessResourceType::Texture2d`, this will + /// return 5, in order to match the `@group(2) @binding(5) var + /// bindless_textures_2d: binding_array>` declaration in + /// `bindless.wgsl`. + /// + /// Not all resource types have fixed binding numbers. If you call + /// [`Self::binding_number`] on such a resource type, it returns `None`. + /// + /// Note that this returns a static reference to the binding number, not the + /// binding number itself. This is to conform to an idiosyncratic API in + /// `wgpu` whereby binding numbers for binding arrays are taken by `&u32` + /// *reference*, not by `u32` value. + pub fn binding_number(&self) -> Option<&'static BindingNumber> { + match BINDING_NUMBERS.binary_search_by_key(self, |(key, _)| *key) { + Ok(binding_number) => Some(&BINDING_NUMBERS[binding_number].1), + Err(_) => None, + } + } +} + +impl From for BindlessResourceType { + fn from(texture_view_dimension: TextureViewDimension) -> Self { + match texture_view_dimension { + TextureViewDimension::D1 => BindlessResourceType::Texture1d, + TextureViewDimension::D2 => BindlessResourceType::Texture2d, + TextureViewDimension::D2Array => BindlessResourceType::Texture2dArray, + TextureViewDimension::Cube => BindlessResourceType::TextureCube, + TextureViewDimension::CubeArray => BindlessResourceType::TextureCubeArray, + TextureViewDimension::D3 => BindlessResourceType::Texture3d, + } + } +} + +impl From for BindlessResourceType { + fn from(sampler_binding_type: SamplerBindingType) -> Self { + match sampler_binding_type { + SamplerBindingType::Filtering => BindlessResourceType::SamplerFiltering, + SamplerBindingType::NonFiltering => BindlessResourceType::SamplerNonFiltering, + SamplerBindingType::Comparison => BindlessResourceType::SamplerComparison, + } + } +} + +impl From for BindlessIndex { + fn from(value: u32) -> Self { + Self(value) + } +} + +impl From for BindingNumber { + fn from(value: u32) -> Self { + Self(value) + } +} diff --git a/crates/bevy_render/src/render_resource/buffer_vec.rs b/crates/bevy_render/src/render_resource/buffer_vec.rs index 0feaa84878..4e6c787fba 100644 --- a/crates/bevy_render/src/render_resource/buffer_vec.rs +++ b/crates/bevy_render/src/render_resource/buffer_vec.rs @@ -103,6 +103,11 @@ impl RawBufferVec { self.values.append(&mut other.values); } + /// Returns the value at the given index. + pub fn get(&self, index: u32) -> Option<&T> { + self.values.get(index as usize) + } + /// Sets the value at the given index. /// /// The index must be less than [`RawBufferVec::len`]. @@ -311,7 +316,7 @@ where // TODO: Consider using unsafe code to push uninitialized, to prevent // the zeroing. It shows up in profiles. - self.data.extend(iter::repeat(0).take(element_size)); + self.data.extend(iter::repeat_n(0, element_size)); // Take a slice of the new data for `write_into` to use. This is // important: it hoists the bounds check up here so that the compiler @@ -451,8 +456,14 @@ where /// Reserves space for one more element in the buffer and returns its index. pub fn add(&mut self) -> usize { + self.add_multiple(1) + } + + /// Reserves space for the given number of elements in the buffer and + /// returns the index of the first one. + pub fn add_multiple(&mut self, count: usize) -> usize { let index = self.len; - self.len += 1; + self.len += count; index } diff --git a/crates/bevy_render/src/render_resource/mod.rs b/crates/bevy_render/src/render_resource/mod.rs index 3d9d9c6010..b777d96290 100644 --- a/crates/bevy_render/src/render_resource/mod.rs +++ b/crates/bevy_render/src/render_resource/mod.rs @@ -3,6 +3,7 @@ mod bind_group; mod bind_group_entries; mod bind_group_layout; mod bind_group_layout_entries; +mod bindless; mod buffer; mod buffer_vec; mod gpu_array_buffer; @@ -19,6 +20,7 @@ pub use bind_group::*; pub use bind_group_entries::*; pub use bind_group_layout::*; pub use bind_group_layout_entries::*; +pub use bindless::*; pub use buffer::*; pub use buffer_vec::*; pub use gpu_array_buffer::*; @@ -49,13 +51,14 @@ pub use wgpu::{ PrimitiveState, PrimitiveTopology, PushConstantRange, RenderPassColorAttachment, RenderPassDepthStencilAttachment, RenderPassDescriptor, RenderPipelineDescriptor as RawRenderPipelineDescriptor, Sampler as WgpuSampler, - SamplerBindingType, SamplerDescriptor, ShaderModule, ShaderModuleDescriptor, ShaderSource, - ShaderStages, StencilFaceState, StencilOperation, StencilState, StorageTextureAccess, StoreOp, - TexelCopyBufferInfo, TexelCopyBufferLayout, TexelCopyTextureInfo, TextureAspect, - TextureDescriptor, TextureDimension, TextureFormat, TextureSampleType, TextureUsages, - TextureView as WgpuTextureView, TextureViewDescriptor, TextureViewDimension, VertexAttribute, - VertexBufferLayout as RawVertexBufferLayout, VertexFormat, VertexState as RawVertexState, - VertexStepMode, COPY_BUFFER_ALIGNMENT, + SamplerBindingType, SamplerBindingType as WgpuSamplerBindingType, SamplerDescriptor, + ShaderModule, ShaderModuleDescriptor, ShaderSource, ShaderStages, StencilFaceState, + StencilOperation, StencilState, StorageTextureAccess, StoreOp, TexelCopyBufferInfo, + TexelCopyBufferLayout, TexelCopyTextureInfo, TextureAspect, TextureDescriptor, + TextureDimension, TextureFormat, TextureFormatFeatureFlags, TextureFormatFeatures, + TextureSampleType, TextureUsages, TextureView as WgpuTextureView, TextureViewDescriptor, + TextureViewDimension, VertexAttribute, VertexBufferLayout as RawVertexBufferLayout, + VertexFormat, VertexState as RawVertexState, VertexStepMode, COPY_BUFFER_ALIGNMENT, }; pub use crate::mesh::VertexBufferLayout; diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index 5715f1f55b..653ae70b1c 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -11,7 +11,7 @@ use bevy_ecs::{ resource::Resource, system::{Res, ResMut}, }; -use bevy_platform_support::collections::{hash_map::EntryRef, HashMap, HashSet}; +use bevy_platform::collections::{hash_map::EntryRef, HashMap, HashSet}; use bevy_tasks::Task; use bevy_utils::default; use core::{future::Future, hash::Hash, mem, ops::Deref}; @@ -127,6 +127,8 @@ struct ShaderData { struct ShaderCache { data: HashMap, ShaderData>, + #[cfg(feature = "shader_format_wesl")] + asset_paths: HashMap>, shaders: HashMap, Shader>, import_path_shaders: HashMap>, waiting_on_import: HashMap>>, @@ -179,6 +181,8 @@ impl ShaderCache { Self { composer, data: Default::default(), + #[cfg(feature = "shader_format_wesl")] + asset_paths: Default::default(), shaders: Default::default(), import_path_shaders: Default::default(), waiting_on_import: Default::default(), @@ -223,6 +227,7 @@ impl ShaderCache { .shaders .get(&id) .ok_or(PipelineCacheError::ShaderNotLoaded(id))?; + let data = self.data.entry(id).or_default(); let n_asset_imports = shader .imports() @@ -251,7 +256,7 @@ impl ShaderCache { shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into()); } - if cfg!(feature = "ios_simulator") { + if cfg!(target_abi = "sim") { shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into()); } @@ -267,6 +272,44 @@ impl ShaderCache { let shader_source = match &shader.source { #[cfg(feature = "shader_format_spirv")] Source::SpirV(data) => make_spirv(data), + #[cfg(feature = "shader_format_wesl")] + Source::Wesl(_) => { + if let ShaderImport::AssetPath(path) = shader.import_path() { + let shader_resolver = + ShaderResolver::new(&self.asset_paths, &self.shaders); + let module_path = wesl::syntax::ModulePath::from_path(path); + let mut compiler_options = wesl::CompileOptions { + imports: true, + condcomp: true, + lower: true, + ..default() + }; + + for shader_def in shader_defs { + match shader_def { + ShaderDefVal::Bool(key, value) => { + compiler_options.features.insert(key.clone(), value); + } + _ => debug!( + "ShaderDefVal::Int and ShaderDefVal::UInt are not supported in wesl", + ), + } + } + + let compiled = wesl::compile( + &module_path, + &shader_resolver, + &wesl::EscapeMangler, + &compiler_options, + ) + .unwrap(); + + let naga = naga::front::wgsl::parse_str(&compiled.to_string()).unwrap(); + ShaderSource::Naga(Cow::Owned(naga)) + } else { + panic!("Wesl shaders must be imported from a file"); + } + } #[cfg(not(feature = "shader_format_spirv"))] Source::SpirV(_) => { unimplemented!( @@ -306,7 +349,28 @@ impl ShaderCache { }, )?; - ShaderSource::Naga(Cow::Owned(naga)) + #[cfg(not(feature = "decoupled_naga"))] + { + ShaderSource::Naga(Cow::Owned(naga)) + } + + #[cfg(feature = "decoupled_naga")] + { + let mut validator = naga::valid::Validator::new( + naga::valid::ValidationFlags::all(), + self.composer.capabilities, + ); + let module_info = validator.validate(&naga).unwrap(); + let wgsl = Cow::Owned( + naga::back::wgsl::write_string( + &naga, + &module_info, + naga::back::wgsl::WriterFlags::empty(), + ) + .unwrap(), + ); + ShaderSource::Wgsl(wgsl) + } } }; @@ -318,7 +382,19 @@ impl ShaderCache { render_device .wgpu_device() .push_error_scope(wgpu::ErrorFilter::Validation); - let shader_module = render_device.create_shader_module(module_descriptor); + + let shader_module = match shader.validate_shader { + ValidateShader::Enabled => { + render_device.create_and_validate_shader_module(module_descriptor) + } + // SAFETY: we are interfacing with shader code, which may contain undefined behavior, + // such as indexing out of bounds. + // The checks required are prohibitively expensive and a poor default for game engines. + ValidateShader::Disabled => unsafe { + render_device.create_shader_module(module_descriptor) + }, + }; + let error = render_device.wgpu_device().pop_error_scope(); // `now_or_never` will return Some if the future is ready and None otherwise. @@ -386,6 +462,13 @@ impl ShaderCache { } } + #[cfg(feature = "shader_format_wesl")] + if let Source::Wesl(_) = shader.source { + if let ShaderImport::AssetPath(path) = shader.import_path() { + self.asset_paths + .insert(wesl::syntax::ModulePath::from_path(path), id); + } + } self.shaders.insert(id, shader); pipelines_to_queue } @@ -400,6 +483,40 @@ impl ShaderCache { } } +#[cfg(feature = "shader_format_wesl")] +pub struct ShaderResolver<'a> { + asset_paths: &'a HashMap>, + shaders: &'a HashMap, Shader>, +} + +#[cfg(feature = "shader_format_wesl")] +impl<'a> ShaderResolver<'a> { + pub fn new( + asset_paths: &'a HashMap>, + shaders: &'a HashMap, Shader>, + ) -> Self { + Self { + asset_paths, + shaders, + } + } +} + +#[cfg(feature = "shader_format_wesl")] +impl<'a> wesl::Resolver for ShaderResolver<'a> { + fn resolve_source( + &self, + module_path: &wesl::syntax::ModulePath, + ) -> Result, wesl::ResolveError> { + let asset_id = self.asset_paths.get(module_path).ok_or_else(|| { + wesl::ResolveError::ModuleNotFound(module_path.clone(), "Invalid asset id".to_string()) + })?; + + let shader = self.shaders.get(asset_id).unwrap(); + Ok(Cow::Borrowed(shader.source.as_str())) + } +} + type LayoutCacheKey = (Vec, Vec); #[derive(Default)] struct LayoutCache { @@ -491,7 +608,10 @@ impl PipelineCache { /// See [`PipelineCache::queue_render_pipeline()`]. #[inline] pub fn get_render_pipeline_state(&self, id: CachedRenderPipelineId) -> &CachedPipelineState { - &self.pipelines[id.0].state + // If the pipeline id isn't in `pipelines`, it's queued in `new_pipelines` + self.pipelines + .get(id.0) + .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state) } /// Get the state of a cached compute pipeline. @@ -499,12 +619,18 @@ impl PipelineCache { /// See [`PipelineCache::queue_compute_pipeline()`]. #[inline] pub fn get_compute_pipeline_state(&self, id: CachedComputePipelineId) -> &CachedPipelineState { - &self.pipelines[id.0].state + // If the pipeline id isn't in `pipelines`, it's queued in `new_pipelines` + self.pipelines + .get(id.0) + .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state) } /// Get the render pipeline descriptor a cached render pipeline was inserted from. /// /// See [`PipelineCache::queue_render_pipeline()`]. + /// + /// **Note**: Be careful calling this method. It will panic if called with a pipeline that + /// has been queued but has not yet been processed by [`PipelineCache::process_queue()`]. #[inline] pub fn get_render_pipeline_descriptor( &self, @@ -519,6 +645,9 @@ impl PipelineCache { /// Get the compute pipeline descriptor a cached render pipeline was inserted from. /// /// See [`PipelineCache::queue_compute_pipeline()`]. + /// + /// **Note**: Be careful calling this method. It will panic if called with a pipeline that + /// has been queued but has not yet been processed by [`PipelineCache::process_queue()`]. #[inline] pub fn get_compute_pipeline_descriptor( &self, @@ -540,7 +669,7 @@ impl PipelineCache { #[inline] pub fn get_render_pipeline(&self, id: CachedRenderPipelineId) -> Option<&RenderPipeline> { if let CachedPipelineState::Ok(Pipeline::RenderPipeline(pipeline)) = - &self.pipelines[id.0].state + &self.pipelines.get(id.0)?.state { Some(pipeline) } else { @@ -574,7 +703,7 @@ impl PipelineCache { #[inline] pub fn get_compute_pipeline(&self, id: CachedComputePipelineId) -> Option<&ComputePipeline> { if let CachedPipelineState::Ok(Pipeline::ComputePipeline(pipeline)) = - &self.pipelines[id.0].state + &self.pipelines.get(id.0)?.state { Some(pipeline) } else { @@ -870,16 +999,14 @@ impl PipelineCache { }; } - CachedPipelineState::Creating(ref mut task) => { - match bevy_tasks::futures::check_ready(task) { - Some(Ok(pipeline)) => { - cached_pipeline.state = CachedPipelineState::Ok(pipeline); - return; - } - Some(Err(err)) => cached_pipeline.state = CachedPipelineState::Err(err), - _ => (), + CachedPipelineState::Creating(task) => match bevy_tasks::futures::check_ready(task) { + Some(Ok(pipeline)) => { + cached_pipeline.state = CachedPipelineState::Ok(pipeline); + return; } - } + Some(Err(err)) => cached_pipeline.state = CachedPipelineState::Err(err), + _ => (), + }, CachedPipelineState::Err(err) => match err { // Retry diff --git a/crates/bevy_render/src/render_resource/pipeline_specializer.rs b/crates/bevy_render/src/render_resource/pipeline_specializer.rs index 67f737a6a4..e017242ea0 100644 --- a/crates/bevy_render/src/render_resource/pipeline_specializer.rs +++ b/crates/bevy_render/src/render_resource/pipeline_specializer.rs @@ -6,7 +6,7 @@ use crate::{ }, }; use bevy_ecs::resource::Resource; -use bevy_platform_support::{ +use bevy_platform::{ collections::{ hash_map::{Entry, RawEntryMut, VacantEntry}, HashMap, diff --git a/crates/bevy_render/src/render_resource/shader.rs b/crates/bevy_render/src/render_resource/shader.rs index 36f71ed3bf..005fb07c05 100644 --- a/crates/bevy_render/src/render_resource/shader.rs +++ b/crates/bevy_render/src/render_resource/shader.rs @@ -21,6 +21,30 @@ pub enum ShaderReflectError { #[error(transparent)] Validation(#[from] naga::WithSpan), } + +/// Describes whether or not to perform runtime checks on shaders. +/// Runtime checks can be enabled for safety at the cost of speed. +/// By default no runtime checks will be performed. +/// +/// # Panics +/// Because no runtime checks are performed for spirv, +/// enabling `ValidateShader` for spirv will cause a panic +#[derive(Clone, Debug, Default)] +pub enum ValidateShader { + #[default] + /// No runtime checks for soundness (e.g. bound checking) are performed. + /// + /// This is suitable for trusted shaders, written by your program or dependencies you trust. + Disabled, + /// Enable's runtime checks for soundness (e.g. bound checking). + /// + /// While this can have a meaningful impact on performance, + /// this setting should *always* be enabled when loading untrusted shaders. + /// This might occur if you are creating a shader playground, running user-generated shaders + /// (as in `VRChat`), or writing a web browser in Bevy. + Enabled, +} + /// A shader, as defined by its [`ShaderSource`](wgpu::ShaderSource) and [`ShaderStage`](naga::ShaderStage) /// This is an "unprocessed" shader. It can contain preprocessor directives. #[derive(Asset, TypePath, Debug, Clone)] @@ -36,6 +60,10 @@ pub struct Shader { // we must store strong handles to our dependencies to stop them // from being immediately dropped if we are the only user. pub file_dependencies: Vec>, + /// Enable or disable runtime shader validation, trading safety against speed. + /// + /// Please read the [`ValidateShader`] docs for a discussion of the tradeoffs involved. + pub validate_shader: ValidateShader, } impl Shader { @@ -78,6 +106,7 @@ impl Shader { additional_imports: Default::default(), shader_defs: Default::default(), file_dependencies: Default::default(), + validate_shader: ValidateShader::Disabled, } } @@ -108,6 +137,7 @@ impl Shader { additional_imports: Default::default(), shader_defs: Default::default(), file_dependencies: Default::default(), + validate_shader: ValidateShader::Disabled, } } @@ -121,6 +151,43 @@ impl Shader { additional_imports: Default::default(), shader_defs: Default::default(), file_dependencies: Default::default(), + validate_shader: ValidateShader::Disabled, + } + } + + #[cfg(feature = "shader_format_wesl")] + pub fn from_wesl(source: impl Into>, path: impl Into) -> Shader { + let source = source.into(); + let path = path.into(); + let (import_path, imports) = Shader::preprocess(&source, &path); + + match import_path { + ShaderImport::AssetPath(asset_path) => { + // Create the shader import path - always starting with "/" + let shader_path = std::path::Path::new("/").join(&asset_path); + + // Convert to a string with forward slashes and without extension + let import_path_str = shader_path + .with_extension("") + .to_string_lossy() + .replace('\\', "/"); + + let import_path = ShaderImport::AssetPath(import_path_str.to_string()); + + Shader { + path, + imports, + import_path, + source: Source::Wesl(source), + additional_imports: Default::default(), + shader_defs: Default::default(), + file_dependencies: Default::default(), + validate_shader: ValidateShader::Disabled, + } + } + ShaderImport::Custom(_) => { + panic!("Wesl shaders must be imported from an asset path"); + } } } @@ -192,6 +259,7 @@ impl<'a> From<&'a Shader> for naga_oil::compose::NagaModuleDescriptor<'a> { #[derive(Debug, Clone)] pub enum Source { Wgsl(Cow<'static, str>), + Wesl(Cow<'static, str>), Glsl(Cow<'static, str>, naga::ShaderStage), SpirV(Cow<'static, [u8]>), // TODO: consider the following @@ -202,7 +270,7 @@ pub enum Source { impl Source { pub fn as_str(&self) -> &str { match self { - Source::Wgsl(s) | Source::Glsl(s, _) => s, + Source::Wgsl(s) | Source::Wesl(s) | Source::Glsl(s, _) => s, Source::SpirV(_) => panic!("spirv not yet implemented"), } } @@ -219,6 +287,7 @@ impl From<&Source> for naga_oil::compose::ShaderLanguage { "GLSL is not supported in this configuration; use the feature `shader_format_glsl`" ), Source::SpirV(_) => panic!("spirv not yet implemented"), + Source::Wesl(_) => panic!("wesl not yet implemented"), } } } @@ -238,6 +307,7 @@ impl From<&Source> for naga_oil::compose::ShaderType { "GLSL is not supported in this configuration; use the feature `shader_format_glsl`" ), Source::SpirV(_) => panic!("spirv not yet implemented"), + Source::Wesl(_) => panic!("wesl not yet implemented"), } } } @@ -281,6 +351,8 @@ impl AssetLoader for ShaderLoader { "comp" => { Shader::from_glsl(String::from_utf8(bytes)?, naga::ShaderStage::Compute, path) } + #[cfg(feature = "shader_format_wesl")] + "wesl" => Shader::from_wesl(String::from_utf8(bytes)?, path), _ => panic!("unhandled extension: {ext}"), }; @@ -294,7 +366,7 @@ impl AssetLoader for ShaderLoader { } fn extensions(&self) -> &[&str] { - &["spv", "wgsl", "vert", "frag", "comp"] + &["spv", "wgsl", "vert", "frag", "comp", "wesl"] } } diff --git a/crates/bevy_render/src/render_resource/uniform_buffer.rs b/crates/bevy_render/src/render_resource/uniform_buffer.rs index 48c2b4a62a..b7d22972df 100644 --- a/crates/bevy_render/src/render_resource/uniform_buffer.rs +++ b/crates/bevy_render/src/render_resource/uniform_buffer.rs @@ -278,11 +278,11 @@ impl DynamicUniformBuffer { device: &RenderDevice, queue: &'a RenderQueue, ) -> Option> { - let alignment = if cfg!(feature = "ios_simulator") { + let alignment = if cfg!(target_abi = "sim") { // On iOS simulator on silicon macs, metal validation check that the host OS alignment // is respected, but the device reports the correct value for iOS, which is smaller. // Use the larger value. - // See https://github.com/bevyengine/bevy/pull/10178 - remove if it's not needed anymore. + // See https://github.com/gfx-rs/wgpu/issues/7057 - remove if it's not needed anymore. AlignmentValue::new(256) } else { AlignmentValue::new(device.limits().min_uniform_buffer_offset_alignment as u64) diff --git a/crates/bevy_render/src/renderer/graph_runner.rs b/crates/bevy_render/src/renderer/graph_runner.rs index cc03374ea4..39f05ca6a8 100644 --- a/crates/bevy_render/src/renderer/graph_runner.rs +++ b/crates/bevy_render/src/renderer/graph_runner.rs @@ -1,5 +1,5 @@ use bevy_ecs::{prelude::Entity, world::World}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; #[cfg(feature = "trace")] use tracing::info_span; @@ -87,10 +87,10 @@ impl RenderGraphRunner { finalizer(render_context.command_encoder()); let (render_device, mut diagnostics_recorder) = { + let (commands, render_device, diagnostics_recorder) = render_context.finish(); + #[cfg(feature = "trace")] let _span = info_span!("submit_graph_commands").entered(); - - let (commands, render_device, diagnostics_recorder) = render_context.finish(); queue.submit(commands); (render_device, diagnostics_recorder) diff --git a/crates/bevy_render/src/renderer/mod.rs b/crates/bevy_render/src/renderer/mod.rs index 5845234191..1691911c2c 100644 --- a/crates/bevy_render/src/renderer/mod.rs +++ b/crates/bevy_render/src/renderer/mod.rs @@ -18,7 +18,7 @@ use crate::{ }; use alloc::sync::Arc; use bevy_ecs::{prelude::*, system::SystemState}; -use bevy_platform_support::time::Instant; +use bevy_platform::time::Instant; use bevy_time::TimeSender; use wgpu::{ Adapter, AdapterInfo, CommandBuffer, CommandEncoder, DeviceType, Instance, Queue, @@ -391,12 +391,13 @@ impl<'w> RenderContext<'w> { adapter_info: AdapterInfo, diagnostics_recorder: Option, ) -> Self { - // HACK: Parallel command encoding is currently bugged on AMD + Windows + Vulkan with wgpu 0.19.1 - #[cfg(target_os = "windows")] + // HACK: Parallel command encoding is currently bugged on AMD + Windows/Linux + Vulkan + #[cfg(any(target_os = "windows", target_os = "linux"))] let force_serial = adapter_info.driver.contains("AMD") && adapter_info.backend == wgpu::Backend::Vulkan; #[cfg(not(any( target_os = "windows", + target_os = "linux", all(target_arch = "wasm32", target_feature = "atomics") )))] let force_serial = { @@ -421,7 +422,7 @@ impl<'w> RenderContext<'w> { /// Gets the diagnostics recorder, used to track elapsed time and pipeline statistics /// of various render and compute passes. - pub fn diagnostic_recorder(&self) -> impl RecordDiagnostics { + pub fn diagnostic_recorder(&self) -> impl RecordDiagnostics + use<> { self.diagnostics_recorder.clone() } @@ -497,6 +498,10 @@ impl<'w> RenderContext<'w> { let mut command_buffers = Vec::with_capacity(self.command_buffer_queue.len()); + #[cfg(feature = "trace")] + let _command_buffer_generation_tasks_span = + info_span!("command_buffer_generation_tasks").entered(); + #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] { let mut task_based_command_buffers = ComputeTaskPool::get().scope(|task_pool| { @@ -536,6 +541,9 @@ impl<'w> RenderContext<'w> { } } + #[cfg(feature = "trace")] + drop(_command_buffer_generation_tasks_span); + command_buffers.sort_unstable_by_key(|(i, _)| *i); let mut command_buffers = command_buffers diff --git a/crates/bevy_render/src/renderer/render_device.rs b/crates/bevy_render/src/renderer/render_device.rs index 8cde892f68..d33139745b 100644 --- a/crates/bevy_render/src/renderer/render_device.rs +++ b/crates/bevy_render/src/renderer/render_device.rs @@ -44,8 +44,18 @@ impl RenderDevice { } /// Creates a [`ShaderModule`](wgpu::ShaderModule) from either SPIR-V or WGSL source code. + /// + /// # Safety + /// + /// Creates a shader module with user-customizable runtime checks which allows shaders to + /// perform operations which can lead to undefined behavior like indexing out of bounds, + /// To avoid UB, ensure any unchecked shaders are sound! + /// This method should never be called for user-supplied shaders. #[inline] - pub fn create_shader_module(&self, desc: wgpu::ShaderModuleDescriptor) -> wgpu::ShaderModule { + pub unsafe fn create_shader_module( + &self, + desc: wgpu::ShaderModuleDescriptor, + ) -> wgpu::ShaderModule { #[cfg(feature = "spirv_shader_passthrough")] match &desc.source { wgpu::ShaderSource::SpirV(source) @@ -64,9 +74,36 @@ impl RenderDevice { }) } } + // SAFETY: + // + // This call passes binary data to the backend as-is and can potentially result in a driver crash or bogus behavior. + // No attempt is made to ensure that data is valid SPIR-V. + _ => unsafe { + self.device + .create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked()) + }, + } + #[cfg(not(feature = "spirv_shader_passthrough"))] + // SAFETY: the caller is responsible for upholding the safety requirements + unsafe { + self.device + .create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked()) + } + } + + /// Creates and validates a [`ShaderModule`](wgpu::ShaderModule) from either SPIR-V or WGSL source code. + /// + /// See [`ValidateShader`](bevy_render::render_resource::ValidateShader) for more information on the tradeoffs involved with shader validation. + #[inline] + pub fn create_and_validate_shader_module( + &self, + desc: wgpu::ShaderModuleDescriptor, + ) -> wgpu::ShaderModule { + #[cfg(feature = "spirv_shader_passthrough")] + match &desc.source { + wgpu::ShaderSource::SpirV(_source) => panic!("no safety checks are performed for spirv shaders. use `create_shader_module` instead"), _ => self.device.create_shader_module(desc), } - #[cfg(not(feature = "spirv_shader_passthrough"))] self.device.create_shader_module(desc) } diff --git a/crates/bevy_render/src/storage.rs b/crates/bevy_render/src/storage.rs index 7434f3999f..0046b4e6ac 100644 --- a/crates/bevy_render/src/storage.rs +++ b/crates/bevy_render/src/storage.rs @@ -27,7 +27,7 @@ impl Plugin for StoragePlugin { /// A storage buffer that is prepared as a [`RenderAsset`] and uploaded to the GPU. #[derive(Asset, Reflect, Debug, Clone)] #[reflect(opaque)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct ShaderStorageBuffer { /// Optional data used to initialize the buffer. pub data: Option>, diff --git a/crates/bevy_render/src/sync_world.rs b/crates/bevy_render/src/sync_world.rs index 3c9dc57ff8..ce04088333 100644 --- a/crates/bevy_render/src/sync_world.rs +++ b/crates/bevy_render/src/sync_world.rs @@ -1,9 +1,10 @@ use bevy_app::Plugin; use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::component::{ComponentCloneBehavior, Mutable, StorageType}; use bevy_ecs::entity::EntityHash; use bevy_ecs::{ component::Component, - entity::{Entity, EntityBorrow, TrustedEntityBorrow}, + entity::{ContainsEntity, Entity, EntityEquivalent}, observer::Trigger, query::With, reflect::ReflectComponent, @@ -11,8 +12,8 @@ use bevy_ecs::{ system::{Local, Query, ResMut, SystemState}, world::{Mut, OnAdd, OnRemove, World}, }; -use bevy_platform_support::collections::{HashMap, HashSet}; -use bevy_reflect::Reflect; +use bevy_platform::collections::{HashMap, HashSet}; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; /// A plugin that synchronizes entities with [`SyncToRenderWorld`] between the main world and the render world. /// @@ -119,14 +120,14 @@ impl Plugin for SyncWorldPlugin { /// [`ExtractComponentPlugin`]: crate::extract_component::ExtractComponentPlugin /// [`SyncComponentPlugin`]: crate::sync_component::SyncComponentPlugin #[derive(Component, Copy, Clone, Debug, Default, Reflect)] -#[reflect[Component]] +#[reflect[Component, Default, Clone]] #[component(storage = "SparseSet")] pub struct SyncToRenderWorld; /// Component added on the main world entities that are synced to the Render World in order to keep track of the corresponding render world entity. /// /// Can also be used as a newtype wrapper for render world entities. -#[derive(Component, Deref, Copy, Clone, Debug, Eq, Hash, PartialEq)] +#[derive(Deref, Copy, Clone, Debug, Eq, Hash, PartialEq)] pub struct RenderEntity(Entity); impl RenderEntity { #[inline] @@ -135,20 +136,30 @@ impl RenderEntity { } } +impl Component for RenderEntity { + const STORAGE_TYPE: StorageType = StorageType::Table; + + type Mutability = Mutable; + + fn clone_behavior() -> ComponentCloneBehavior { + ComponentCloneBehavior::Ignore + } +} + impl From for RenderEntity { fn from(entity: Entity) -> Self { RenderEntity(entity) } } -impl EntityBorrow for RenderEntity { +impl ContainsEntity for RenderEntity { fn entity(&self) -> Entity { self.id() } } // SAFETY: RenderEntity is a newtype around Entity that derives its comparison traits. -unsafe impl TrustedEntityBorrow for RenderEntity {} +unsafe impl EntityEquivalent for RenderEntity {} /// Component added on the render world entities to keep track of the corresponding main world entity. /// @@ -168,14 +179,14 @@ impl From for MainEntity { } } -impl EntityBorrow for MainEntity { +impl ContainsEntity for MainEntity { fn entity(&self) -> Entity { self.id() } } // SAFETY: RenderEntity is a newtype around Entity that derives its comparison traits. -unsafe impl TrustedEntityBorrow for MainEntity {} +unsafe impl EntityEquivalent for MainEntity {} /// A [`HashMap`] pre-configured to use [`EntityHash`] hashing with a [`MainEntity`]. pub type MainEntityHashMap = HashMap; @@ -185,7 +196,7 @@ pub type MainEntityHashSet = HashSet; /// Marker component that indicates that its entity needs to be despawned at the end of the frame. #[derive(Component, Copy, Clone, Debug, Default, Reflect)] -#[reflect(Component)] +#[reflect(Component, Default, Clone)] pub struct TemporaryRenderEntity; /// A record enum to what entities with [`SyncToRenderWorld`] have been added or removed. @@ -358,6 +369,7 @@ mod render_entities_world_query_impls { // SAFETY: Component access of Self::ReadOnly is a subset of Self. // Self::ReadOnly matches exactly the same archetypes/tables as Self. unsafe impl QueryData for RenderEntity { + const IS_READ_ONLY: bool = true; type ReadOnly = RenderEntity; type Item<'w> = Entity; @@ -457,6 +469,7 @@ mod render_entities_world_query_impls { // SAFETY: Component access of Self::ReadOnly is a subset of Self. // Self::ReadOnly matches exactly the same archetypes/tables as Self. unsafe impl QueryData for MainEntity { + const IS_READ_ONLY: bool = true; type ReadOnly = MainEntity; type Item<'w> = Entity; @@ -539,7 +552,7 @@ mod tests { // Only one synchronized entity assert!(q.iter(&render_world).count() == 1); - let render_entity = q.get_single(&render_world).unwrap(); + let render_entity = q.single(&render_world).unwrap(); let render_entity_component = main_world.get::(main_entity).unwrap(); assert!(render_entity_component.id() == render_entity); diff --git a/crates/bevy_render/src/texture/fallback_image.rs b/crates/bevy_render/src/texture/fallback_image.rs index 38d94ff5d7..18c83414bd 100644 --- a/crates/bevy_render/src/texture/fallback_image.rs +++ b/crates/bevy_render/src/texture/fallback_image.rs @@ -11,7 +11,7 @@ use bevy_ecs::{ system::SystemParam, }; use bevy_image::{BevyDefault, Image, ImageSampler, TextureFormatPixelInfo}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; /// A [`RenderApp`](crate::RenderApp) resource that contains the default "fallback image", /// which can be used in situations where an image was not explicitly defined. The most common @@ -98,7 +98,7 @@ fn fallback_image_new( RenderAssetUsages::RENDER_WORLD, ) } else { - let mut image = Image::default(); + let mut image = Image::default_uninit(); image.texture_descriptor.dimension = TextureDimension::D2; image.texture_descriptor.size = extents; image.texture_descriptor.format = format; @@ -114,7 +114,7 @@ fn fallback_image_new( render_queue, &image.texture_descriptor, TextureDataOrder::default(), - &image.data, + &image.data.expect("Image has no data"), ) } else { render_device.create_texture(&image.texture_descriptor) diff --git a/crates/bevy_render/src/texture/gpu_image.rs b/crates/bevy_render/src/texture/gpu_image.rs index f1ee1ade7e..551bd3ee02 100644 --- a/crates/bevy_render/src/texture/gpu_image.rs +++ b/crates/bevy_render/src/texture/gpu_image.rs @@ -36,7 +36,7 @@ impl RenderAsset for GpuImage { #[inline] fn byte_len(image: &Self::SourceAsset) -> Option { - Some(image.data.len()) + image.data.as_ref().map(Vec::len) } /// Converts the extracted image into a [`GpuImage`]. @@ -45,13 +45,17 @@ impl RenderAsset for GpuImage { _: AssetId, (render_device, render_queue, default_sampler): &mut SystemParamItem, ) -> Result> { - let texture = render_device.create_texture_with_data( - render_queue, - &image.texture_descriptor, - // TODO: Is this correct? Do we need to use `MipMajor` if it's a ktx2 file? - wgpu::util::TextureDataOrder::default(), - &image.data, - ); + let texture = if let Some(ref data) = image.data { + render_device.create_texture_with_data( + render_queue, + &image.texture_descriptor, + // TODO: Is this correct? Do we need to use `MipMajor` if it's a ktx2 file? + wgpu::util::TextureDataOrder::default(), + data, + ) + } else { + render_device.create_texture(&image.texture_descriptor) + }; let texture_view = texture.create_view( image diff --git a/crates/bevy_render/src/texture/texture_cache.rs b/crates/bevy_render/src/texture/texture_cache.rs index 8a3fb01010..ca1ef9b31b 100644 --- a/crates/bevy_render/src/texture/texture_cache.rs +++ b/crates/bevy_render/src/texture/texture_cache.rs @@ -3,7 +3,7 @@ use crate::{ renderer::RenderDevice, }; use bevy_ecs::{prelude::ResMut, resource::Resource}; -use bevy_platform_support::collections::{hash_map::Entry, HashMap}; +use bevy_platform::collections::{hash_map::Entry, HashMap}; use wgpu::{TextureDescriptor, TextureViewDescriptor}; /// The internal representation of a [`CachedTexture`] used to track whether it was recently used diff --git a/crates/bevy_render/src/view/mod.rs b/crates/bevy_render/src/view/mod.rs index b0fab01d01..c392dcaaeb 100644 --- a/crates/bevy_render/src/view/mod.rs +++ b/crates/bevy_render/src/view/mod.rs @@ -2,6 +2,7 @@ pub mod visibility; pub mod window; use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_diagnostic::FrameCount; pub use visibility::*; pub use window::*; @@ -32,7 +33,7 @@ use bevy_derive::{Deref, DerefMut}; use bevy_ecs::prelude::*; use bevy_image::BevyDefault as _; use bevy_math::{mat3, vec2, vec3, Mat3, Mat4, UVec4, Vec2, Vec3, Vec4, Vec4Swizzles}; -use bevy_platform_support::collections::{hash_map::Entry, HashMap}; +use bevy_platform::collections::{hash_map::Entry, HashMap}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render_macros::ExtractComponent; use bevy_transform::components::GlobalTransform; @@ -316,7 +317,7 @@ impl ExtractedView { /// `post_saturation` value in [`ColorGradingGlobal`], which is applied after /// tonemapping. #[derive(Component, Reflect, Debug, Default, Clone)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct ColorGrading { /// Filmic color grading values applied to the image as a whole (as opposed /// to individual sections, like shadows and highlights). @@ -345,7 +346,7 @@ pub struct ColorGrading { /// Filmic color grading values applied to the image as a whole (as opposed to /// individual sections, like shadows and highlights). #[derive(Clone, Debug, Reflect)] -#[reflect(Default)] +#[reflect(Default, Clone)] pub struct ColorGradingGlobal { /// Exposure value (EV) offset, measured in stops. pub exposure: f32, @@ -411,6 +412,7 @@ pub struct ColorGradingUniform { /// A section of color grading values that can be selectively applied to /// shadows, midtones, and highlights. #[derive(Reflect, Debug, Copy, Clone, PartialEq)] +#[reflect(Clone, PartialEq)] pub struct ColorGradingSection { /// Values below 1.0 desaturate, with a value of 0.0 resulting in a grayscale image /// with luminance defined by ITU-R BT.709. @@ -568,6 +570,7 @@ pub struct ViewUniform { pub frustum: [Vec4; 6], pub color_grading: ColorGradingUniform, pub mip_bias: f32, + pub frame_count: u32, } #[derive(Resource)] @@ -613,7 +616,9 @@ pub struct ViewTargetAttachments(HashMap { pub source: &'a TextureView, + pub source_texture: &'a Texture, pub destination: &'a TextureView, + pub destination_texture: &'a Texture, } impl From for ColorGradingUniform { @@ -706,6 +711,9 @@ impl From for ColorGradingUniform { /// /// The vast majority of applications will not need to use this component, as it /// generally reduces rendering performance. +/// +/// Note: This component should only be added when initially spawning a camera. Adding +/// or removing after spawn can result in unspecified behavior. #[derive(Component, Default)] pub struct NoIndirectDrawing; @@ -841,13 +849,17 @@ impl ViewTarget { self.main_textures.b.mark_as_cleared(); PostProcessWrite { source: &self.main_textures.a.texture.default_view, + source_texture: &self.main_textures.a.texture.texture, destination: &self.main_textures.b.texture.default_view, + destination_texture: &self.main_textures.b.texture.texture, } } else { self.main_textures.a.mark_as_cleared(); PostProcessWrite { source: &self.main_textures.b.texture.default_view, + source_texture: &self.main_textures.b.texture.texture, destination: &self.main_textures.a.texture.default_view, + destination_texture: &self.main_textures.a.texture.texture, } } } @@ -889,6 +901,7 @@ pub fn prepare_view_uniforms( Option<&TemporalJitter>, Option<&MipBias>, )>, + frame_count: Res, ) { let view_iter = views.iter(); let view_count = view_iter.len(); @@ -942,6 +955,7 @@ pub fn prepare_view_uniforms( frustum, color_grading: extracted_view.color_grading.clone().into(), mip_bias: mip_bias.unwrap_or(&MipBias(0.0)).0, + frame_count: frame_count.0, }), }; @@ -1039,7 +1053,7 @@ pub fn prepare_view_targets( }; let (a, b, sampled, main_texture) = textures - .entry((camera.target.clone(), view.hdr, msaa)) + .entry((camera.target.clone(), texture_usage.0, view.hdr, msaa)) .or_insert_with(|| { let descriptor = TextureDescriptor { label: None, diff --git a/crates/bevy_render/src/view/view.wgsl b/crates/bevy_render/src/view/view.wgsl index ed08599758..317de2eb88 100644 --- a/crates/bevy_render/src/view/view.wgsl +++ b/crates/bevy_render/src/view/view.wgsl @@ -60,4 +60,5 @@ struct View { frustum: array, 6>, color_grading: ColorGrading, mip_bias: f32, + frame_count: u32, }; diff --git a/crates/bevy_render/src/view/visibility/mod.rs b/crates/bevy_render/src/view/visibility/mod.rs index 33c6639266..63c931a8b0 100644 --- a/crates/bevy_render/src/view/visibility/mod.rs +++ b/crates/bevy_render/src/view/visibility/mod.rs @@ -4,7 +4,7 @@ mod render_layers; use core::any::TypeId; use bevy_ecs::component::HookContext; -use bevy_ecs::entity::hash_set::EntityHashSet; +use bevy_ecs::entity::EntityHashSet; use bevy_ecs::world::DeferredWorld; use derive_more::derive::{Deref, DerefMut}; pub use range::*; @@ -34,7 +34,7 @@ use crate::{ /// This is done by the `visibility_propagate_system` which uses the entity hierarchy and /// `Visibility` to set the values of each entity's [`InheritedVisibility`] component. #[derive(Component, Clone, Copy, Reflect, Debug, PartialEq, Eq, Default)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] #[require(InheritedVisibility, ViewVisibility)] pub enum Visibility { /// An entity with `Visibility::Inherited` will inherit the Visibility of its [`ChildOf`] target. @@ -109,7 +109,7 @@ impl PartialEq<&Visibility> for Visibility { /// /// [`VisibilityPropagate`]: VisibilitySystems::VisibilityPropagate #[derive(Component, Deref, Debug, Default, Clone, Copy, Reflect, PartialEq, Eq)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] #[component(on_insert = validate_parent_has_component::)] pub struct InheritedVisibility(bool); @@ -152,7 +152,7 @@ impl InheritedVisibility { // Note: This can't be a `ComponentId` because the visibility classes are copied // into the render world, and component IDs are per-world. #[derive(Clone, Component, Default, Reflect, Deref, DerefMut)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct VisibilityClass(pub SmallVec<[TypeId; 1]>); /// Algorithmically-computed indication of whether an entity is visible and should be extracted for rendering. @@ -166,7 +166,7 @@ pub struct VisibilityClass(pub SmallVec<[TypeId; 1]>); /// [`VisibilityPropagate`]: VisibilitySystems::VisibilityPropagate /// [`CheckVisibility`]: VisibilitySystems::CheckVisibility #[derive(Component, Deref, Debug, Default, Clone, Copy, Reflect, PartialEq, Eq)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub struct ViewVisibility(bool); impl ViewVisibility { @@ -204,7 +204,7 @@ impl ViewVisibility { /// It can be used for example: /// - when a [`Mesh`] is updated but its [`Aabb`] is not, which might happen with animations, /// - when using some light effects, like wanting a [`Mesh`] out of the [`Frustum`] -/// to appear in the reflection of a [`Mesh`] within. +/// to appear in the reflection of a [`Mesh`] within. #[derive(Debug, Component, Default, Reflect)] #[reflect(Component, Default, Debug)] pub struct NoFrustumCulling; @@ -219,9 +219,9 @@ pub struct NoFrustumCulling; /// This component is intended to be attached to the same entity as the [`Camera`] and /// the [`Frustum`] defining the view. #[derive(Clone, Component, Default, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct VisibleEntities { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub entities: TypeIdMap>, } @@ -269,9 +269,9 @@ impl VisibleEntities { /// /// This component is extracted from [`VisibleEntities`]. #[derive(Clone, Component, Default, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct RenderVisibleEntities { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub entities: TypeIdMap>, } @@ -325,6 +325,10 @@ pub enum VisibilitySystems { /// the order of systems within this set is irrelevant, as [`check_visibility`] /// assumes that its operations are irreversible during the frame. CheckVisibility, + /// Label for the `mark_newly_hidden_entities_invisible` system, which sets + /// [`ViewVisibility`] to [`ViewVisibility::HIDDEN`] for entities that no + /// view has marked as visible. + MarkNewlyHiddenEntitiesInvisible, } pub struct VisibilityPlugin; @@ -340,6 +344,10 @@ impl Plugin for VisibilityPlugin { .before(CheckVisibility) .after(TransformSystem::TransformPropagate), ) + .configure_sets( + PostUpdate, + MarkNewlyHiddenEntitiesInvisible.after(CheckVisibility), + ) .init_resource::() .add_systems( PostUpdate, @@ -348,6 +356,7 @@ impl Plugin for VisibilityPlugin { (visibility_propagate_system, reset_view_visibility) .in_set(VisibilityPropagate), check_visibility.in_set(CheckVisibility), + mark_newly_hidden_entities_invisible.in_set(MarkNewlyHiddenEntitiesInvisible), ), ); } @@ -396,13 +405,13 @@ fn visibility_propagate_system( mut visibility_query: Query<(&Visibility, &mut InheritedVisibility)>, children_query: Query<&Children, (With, With)>, ) { - for (entity, visibility, parent, children) in &changed { + for (entity, visibility, child_of, children) in &changed { let is_visible = match visibility { Visibility::Visible => true, Visibility::Hidden => false, // fall back to true if no parent is found or parent lacks components - Visibility::Inherited => parent - .and_then(|p| visibility_query.get(p.get()).ok()) + Visibility::Inherited => child_of + .and_then(|c| visibility_query.get(c.parent()).ok()) .is_none_or(|(_, x)| x.get()), }; let (_, mut inherited_visibility) = visibility_query @@ -456,6 +465,10 @@ fn propagate_recursive( } /// Stores all entities that were visible in the previous frame. +/// +/// As systems that check visibility judge entities visible, they remove them +/// from this set. Afterward, the `mark_newly_hidden_entities_invisible` system +/// runs and marks every mesh still remaining in this set as hidden. #[derive(Resource, Default, Deref, DerefMut)] pub struct PreviousVisibleEntities(EntityHashSet); @@ -607,13 +620,23 @@ pub fn check_visibility( } } } +} - // Now whatever previous visible entities are left are entities that were +/// Marks any entities that weren't judged visible this frame as invisible. +/// +/// As visibility-determining systems run, they remove entities that they judge +/// visible from [`PreviousVisibleEntities`]. At the end of visibility +/// determination, all entities that remain in [`PreviousVisibleEntities`] must +/// be invisible. This system goes through those entities and marks them newly +/// invisible (which sets the change flag for them). +fn mark_newly_hidden_entities_invisible( + mut view_visibilities: Query<&mut ViewVisibility>, + mut previous_visible_entities: ResMut, +) { + // Whatever previous visible entities are left are entities that were // visible last frame but just became invisible. for entity in previous_visible_entities.drain() { - if let Ok((_, _, mut view_visibility, _, _, _, _, _, _)) = - visible_aabb_query.get_mut(entity) - { + if let Ok(mut view_visibility) = view_visibilities.get_mut(entity) { *view_visibility = ViewVisibility::HIDDEN; } } diff --git a/crates/bevy_render/src/view/visibility/range.rs b/crates/bevy_render/src/view/visibility/range.rs index fe0af5b08a..4c264e0778 100644 --- a/crates/bevy_render/src/view/visibility/range.rs +++ b/crates/bevy_render/src/view/visibility/range.rs @@ -9,19 +9,19 @@ use core::{ use bevy_app::{App, Plugin, PostUpdate}; use bevy_ecs::{ component::Component, - entity::{hash_map::EntityHashMap, Entity}, + entity::{Entity, EntityHashMap}, query::{Changed, With}, reflect::ReflectComponent, removal_detection::RemovedComponents, resource::Resource, - schedule::IntoSystemConfigs as _, - system::{Query, Res, ResMut}, + schedule::IntoScheduleConfigs as _, + system::{Local, Query, Res, ResMut}, }; use bevy_math::{vec4, FloatOrd, Vec4}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::Reflect; use bevy_transform::components::GlobalTransform; -use bevy_utils::prelude::default; +use bevy_utils::{prelude::default, Parallel}; use nonmax::NonMaxU16; use wgpu::{BufferBindingType, BufferUsages}; @@ -114,7 +114,7 @@ impl Plugin for VisibilityRangePlugin { /// `start_margin` of the next lower LOD; this is important for the crossfade /// effect to function properly. #[derive(Component, Clone, PartialEq, Default, Reflect)] -#[reflect(Component, PartialEq, Hash)] +#[reflect(Component, PartialEq, Hash, Clone)] pub struct VisibilityRange { /// The range of distances, in world units, between which this entity will /// smoothly fade into view as the camera zooms out. @@ -385,7 +385,8 @@ impl VisibleEntityRanges { pub fn check_visibility_ranges( mut visible_entity_ranges: ResMut, view_query: Query<(Entity, &GlobalTransform), With>, - mut entity_query: Query<(Entity, &GlobalTransform, Option<&Aabb>, &VisibilityRange)>, + mut par_local: Local>>, + entity_query: Query<(Entity, &GlobalTransform, Option<&Aabb>, &VisibilityRange)>, ) { visible_entity_ranges.clear(); @@ -404,30 +405,34 @@ pub fn check_visibility_ranges( // Check each entity/view pair. Only consider entities with // [`VisibilityRange`] components. - for (entity, entity_transform, maybe_model_aabb, visibility_range) in entity_query.iter_mut() { - let mut visibility = 0; - for (view_index, &(_, view_position)) in views.iter().enumerate() { - // If instructed to use the AABB and the model has one, use its - // center as the model position. Otherwise, use the model's - // translation. - let model_position = match (visibility_range.use_aabb, maybe_model_aabb) { - (true, Some(model_aabb)) => entity_transform - .affine() - .transform_point3a(model_aabb.center), - _ => entity_transform.translation_vec3a(), - }; + entity_query.par_iter().for_each( + |(entity, entity_transform, maybe_model_aabb, visibility_range)| { + let mut visibility = 0; + for (view_index, &(_, view_position)) in views.iter().enumerate() { + // If instructed to use the AABB and the model has one, use its + // center as the model position. Otherwise, use the model's + // translation. + let model_position = match (visibility_range.use_aabb, maybe_model_aabb) { + (true, Some(model_aabb)) => entity_transform + .affine() + .transform_point3a(model_aabb.center), + _ => entity_transform.translation_vec3a(), + }; - if visibility_range.is_visible_at_all((view_position - model_position).length()) { - visibility |= 1 << view_index; + if visibility_range.is_visible_at_all((view_position - model_position).length()) { + visibility |= 1 << view_index; + } } - } - // Invisible entities have no entry at all in the hash map. This speeds - // up checks slightly in this common case. - if visibility != 0 { - visible_entity_ranges.entities.insert(entity, visibility); - } - } + // Invisible entities have no entry at all in the hash map. This speeds + // up checks slightly in this common case. + if visibility != 0 { + par_local.borrow_local_mut().push((entity, visibility)); + } + }, + ); + + visible_entity_ranges.entities.extend(par_local.drain()); } /// Extracts all [`VisibilityRange`] components from the main world to the diff --git a/crates/bevy_render/src/view/visibility/render_layers.rs b/crates/bevy_render/src/view/visibility/render_layers.rs index 1932abdf71..a5a58453e8 100644 --- a/crates/bevy_render/src/view/visibility/render_layers.rs +++ b/crates/bevy_render/src/view/visibility/render_layers.rs @@ -20,7 +20,7 @@ pub type Layer = usize; /// /// Entities without this component belong to layer `0`. #[derive(Component, Clone, Reflect, PartialEq, Eq, PartialOrd, Ord)] -#[reflect(Component, Default, PartialEq, Debug)] +#[reflect(Component, Default, PartialEq, Debug, Clone)] pub struct RenderLayers(SmallVec<[u64; INLINE_BLOCKS]>); /// The number of memory blocks stored inline diff --git a/crates/bevy_render/src/view/window/mod.rs b/crates/bevy_render/src/view/window/mod.rs index 14a984ff39..c3fc6e5516 100644 --- a/crates/bevy_render/src/view/window/mod.rs +++ b/crates/bevy_render/src/view/window/mod.rs @@ -4,8 +4,8 @@ use crate::{ Extract, ExtractSchedule, Render, RenderApp, RenderSet, WgpuWrapper, }; use bevy_app::{App, Plugin}; -use bevy_ecs::{entity::hash_map::EntityHashMap, prelude::*}; -use bevy_platform_support::collections::HashSet; +use bevy_ecs::{entity::EntityHashMap, prelude::*}; +use bevy_platform::collections::HashSet; use bevy_utils::default; use bevy_window::{ CompositeAlphaMode, PresentMode, PrimaryWindow, RawHandleWrapper, Window, WindowClosing, @@ -304,9 +304,7 @@ const DEFAULT_DESIRED_MAXIMUM_FRAME_LATENCY: u32 = 2; pub fn create_surfaces( // By accessing a NonSend resource, we tell the scheduler to put this system on the main thread, // which is necessary for some OS's - #[cfg(any(target_os = "macos", target_os = "ios"))] _marker: Option< - NonSend, - >, + #[cfg(any(target_os = "macos", target_os = "ios"))] _marker: bevy_ecs::system::NonSendMarker, windows: Res, mut window_surfaces: ResMut, render_instance: Res, diff --git a/crates/bevy_render/src/view/window/screenshot.rs b/crates/bevy_render/src/view/window/screenshot.rs index d8a309036e..6e223eedaf 100644 --- a/crates/bevy_render/src/view/window/screenshot.rs +++ b/crates/bevy_render/src/view/window/screenshot.rs @@ -20,10 +20,10 @@ use bevy_app::{First, Plugin, Update}; use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - entity::hash_map::EntityHashMap, event::event_update_system, prelude::*, system::SystemState, + entity::EntityHashMap, event::event_update_system, prelude::*, system::SystemState, }; use bevy_image::{Image, TextureFormatPixelInfo}; -use bevy_platform_support::collections::HashSet; +use bevy_platform::collections::HashSet; use bevy_reflect::Reflect; use bevy_tasks::AsyncComputeTaskPool; use bevy_utils::default; diff --git a/crates/bevy_scene/Cargo.toml b/crates/bevy_scene/Cargo.toml index ac263fe734..3bb913c859 100644 --- a/crates/bevy_scene/Cargo.toml +++ b/crates/bevy_scene/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_scene" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides scene functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -14,7 +14,7 @@ serialize = [ "dep:serde", "uuid/serde", "bevy_ecs/serialize", - "bevy_platform_support/serialize", + "bevy_platform/serialize", ] [dependencies] @@ -23,13 +23,11 @@ bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ - "bevy", -] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } bevy_render = { path = "../bevy_render", version = "0.16.0-dev", optional = true } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", ] } @@ -40,11 +38,12 @@ thiserror = { version = "2", default-features = false } derive_more = { version = "1", default-features = false, features = ["from"] } [target.'cfg(target_arch = "wasm32")'.dependencies] +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. uuid = { version = "1.13.1", default-features = false, features = ["js"] } [dev-dependencies] postcard = { version = "1.0", features = ["alloc"] } -bincode = "1.3" +bincode = { version = "2.0", features = ["serde"] } rmp-serde = "1.1" [lints] diff --git a/crates/bevy_scene/src/components.rs b/crates/bevy_scene/src/components.rs index 8709c7990f..d4d42c3a1c 100644 --- a/crates/bevy_scene/src/components.rs +++ b/crates/bevy_scene/src/components.rs @@ -1,9 +1,6 @@ use bevy_asset::Handle; use bevy_derive::{Deref, DerefMut}; -use bevy_ecs::{ - component::{require, Component}, - prelude::ReflectComponent, -}; +use bevy_ecs::{component::Component, prelude::ReflectComponent}; use bevy_reflect::{prelude::ReflectDefault, Reflect}; use bevy_transform::components::Transform; use derive_more::derive::From; @@ -16,7 +13,7 @@ use crate::{DynamicScene, Scene}; /// Adding this component will spawn the scene as a child of that entity. /// Once it's spawned, the entity will have a [`SceneInstance`](crate::SceneInstance) component. #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] #[require(Transform)] #[cfg_attr(feature = "bevy_render", require(Visibility))] pub struct SceneRoot(pub Handle); @@ -24,7 +21,7 @@ pub struct SceneRoot(pub Handle); /// Adding this component will spawn the scene as a child of that entity. /// Once it's spawned, the entity will have a [`SceneInstance`](crate::SceneInstance) component. #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] #[require(Transform)] #[cfg_attr(feature = "bevy_render", require(Visibility))] pub struct DynamicSceneRoot(pub Handle); diff --git a/crates/bevy_scene/src/dynamic_scene.rs b/crates/bevy_scene/src/dynamic_scene.rs index be45b54a81..f0cf3960d6 100644 --- a/crates/bevy_scene/src/dynamic_scene.rs +++ b/crates/bevy_scene/src/dynamic_scene.rs @@ -1,18 +1,23 @@ -use crate::{ron, DynamicSceneBuilder, Scene, SceneSpawnError}; +use crate::{DynamicSceneBuilder, Scene, SceneSpawnError}; use bevy_asset::Asset; use bevy_ecs::reflect::{ReflectMapEntities, ReflectResource}; use bevy_ecs::{ - entity::{hash_map::EntityHashMap, Entity, SceneEntityMapper}, + entity::{Entity, EntityHashMap, SceneEntityMapper}, reflect::{AppTypeRegistry, ReflectComponent}, world::World, }; -use bevy_reflect::{PartialReflect, TypePath, TypeRegistry}; +use bevy_reflect::{PartialReflect, TypePath}; + +use crate::reflect_utils::clone_reflect_value; +use bevy_ecs::component::ComponentCloneBehavior; +use bevy_ecs::relationship::RelationshipHookMode; #[cfg(feature = "serialize")] -use crate::serde::SceneSerializer; -use bevy_ecs::component::ComponentCloneBehavior; -#[cfg(feature = "serialize")] -use serde::Serialize; +use { + crate::{ron, serde::SceneSerializer}, + bevy_reflect::TypeRegistry, + serde::Serialize, +}; /// A collection of serializable resources and dynamic entities. /// @@ -86,7 +91,6 @@ impl DynamicScene { // Apply/ add each component to the given entity. for component in &scene_entity.components { - let component = component.clone_value(); let type_info = component.get_represented_type_info().ok_or_else(|| { SceneSpawnError::NoRepresentedType { type_path: component.reflect_type_path().to_string(), @@ -110,10 +114,8 @@ impl DynamicScene { #[expect(unsafe_code, reason = "this is faster")] let component_info = unsafe { world.components().get_info_unchecked(component_id) }; - match component_info.clone_behavior() { - ComponentCloneBehavior::Ignore - | ComponentCloneBehavior::RelationshipTarget(_) => continue, - _ => {} + if *component_info.clone_behavior() == ComponentCloneBehavior::Ignore { + continue; } } @@ -123,6 +125,7 @@ impl DynamicScene { component.as_partial_reflect(), &type_registry, mapper, + RelationshipHookMode::Skip, ); }); } @@ -131,7 +134,6 @@ impl DynamicScene { // Insert resources after all entities have been added to the world. // This ensures the entities are available for the resources to reference during mapping. for resource in &self.resources { - let mut resource = resource.clone_value(); let type_info = resource.get_represented_type_info().ok_or_else(|| { SceneSpawnError::NoRepresentedType { type_path: resource.reflect_type_path().to_string(), @@ -150,15 +152,22 @@ impl DynamicScene { // If this component references entities in the scene, update // them to the entities in the world. - if let Some(map_entities) = registration.data::() { + let mut cloned_resource; + let partial_reflect_resource = if let Some(map_entities) = + registration.data::() + { + cloned_resource = clone_reflect_value(resource.as_partial_reflect(), registration); SceneEntityMapper::world_scope(entity_map, world, |_, mapper| { - map_entities.map_entities(resource.as_partial_reflect_mut(), mapper); + map_entities.map_entities(cloned_resource.as_partial_reflect_mut(), mapper); }); - } + cloned_resource.as_partial_reflect() + } else { + resource.as_partial_reflect() + }; // If the world already contains an instance of the given resource // just apply the (possibly) new value, otherwise insert the resource - reflect_resource.apply_or_insert(world, resource.as_partial_reflect(), &type_registry); + reflect_resource.apply_or_insert(world, partial_reflect_resource, &type_registry); } Ok(()) @@ -208,24 +217,24 @@ where mod tests { use bevy_ecs::{ component::Component, - entity::{ - hash_map::EntityHashMap, Entity, EntityMapper, MapEntities, VisitEntities, - VisitEntitiesMut, - }, + entity::{Entity, EntityHashMap, EntityMapper, MapEntities}, hierarchy::ChildOf, reflect::{AppTypeRegistry, ReflectComponent, ReflectMapEntities, ReflectResource}, resource::Resource, world::World, }; + use bevy_reflect::Reflect; use crate::dynamic_scene::DynamicScene; use crate::dynamic_scene_builder::DynamicSceneBuilder; - #[derive(Resource, Reflect, Debug, VisitEntities, VisitEntitiesMut)] + #[derive(Resource, Reflect, MapEntities, Debug)] #[reflect(Resource, MapEntities)] struct TestResource { + #[entities] entity_a: Entity, + #[entities] entity_b: Entity, } @@ -316,7 +325,7 @@ mod tests { .unwrap() .get::() .unwrap() - .get(), + .parent(), "something about reloading the scene is touching entities with the same scene Ids" ); assert_eq!( @@ -326,7 +335,7 @@ mod tests { .unwrap() .get::() .unwrap() - .get(), + .parent(), "something about reloading the scene is touching components not defined in the scene but on entities defined in the scene" ); assert_eq!( @@ -336,7 +345,7 @@ mod tests { .unwrap() .get::() .expect("something is wrong with this test, and the scene components don't have a parent/child relationship") - .get(), + .parent(), "something is wrong with this test or the code reloading scenes since the relationship between scene entities is broken" ); } @@ -354,7 +363,7 @@ mod tests { struct B(pub Entity); impl MapEntities for B { - fn map_entities(&mut self, entity_mapper: &mut M) { + fn map_entities(&mut self, entity_mapper: &mut E) { self.0 = entity_mapper.get_mapped(self.0); } } diff --git a/crates/bevy_scene/src/dynamic_scene_builder.rs b/crates/bevy_scene/src/dynamic_scene_builder.rs index e2981ed973..5deac09aaa 100644 --- a/crates/bevy_scene/src/dynamic_scene_builder.rs +++ b/crates/bevy_scene/src/dynamic_scene_builder.rs @@ -1,5 +1,6 @@ use core::any::TypeId; +use crate::reflect_utils::clone_reflect_value; use crate::{DynamicEntity, DynamicScene, SceneFilter}; use alloc::collections::BTreeMap; use bevy_ecs::{ @@ -10,7 +11,7 @@ use bevy_ecs::{ resource::Resource, world::World, }; -use bevy_reflect::{PartialReflect, ReflectFromReflect}; +use bevy_reflect::PartialReflect; use bevy_utils::default; /// A [`DynamicScene`] builder, used to build a scene from a [`World`] by extracting some entities and resources. @@ -303,14 +304,8 @@ impl<'w> DynamicSceneBuilder<'w> { .data::()? .reflect(original_entity)?; - // Clone via `FromReflect`. Unlike `PartialReflect::clone_value` this - // retains the original type and `ReflectSerialize` type data which is needed to - // deserialize. - let component = type_registration - .data::() - .and_then(|fr| fr.from_reflect(component.as_partial_reflect())) - .map(PartialReflect::into_partial_reflect) - .unwrap_or_else(|| component.clone_value()); + let component = + clone_reflect_value(component.as_partial_reflect(), type_registration); entry.components.push(component); Some(()) @@ -351,6 +346,7 @@ impl<'w> DynamicSceneBuilder<'w> { /// [`deny_resource`]: Self::deny_resource #[must_use] pub fn extract_resources(mut self) -> Self { + // Don't extract the DefaultQueryFilters resource let original_world_dqf_id = self .original_world .components() @@ -380,13 +376,11 @@ impl<'w> DynamicSceneBuilder<'w> { let resource = type_registration .data::()? - .reflect(self.original_world)?; + .reflect(self.original_world) + .ok()?; - let resource = type_registration - .data::() - .and_then(|fr| fr.from_reflect(resource.as_partial_reflect())) - .map(PartialReflect::into_partial_reflect) - .unwrap_or_else(|| resource.clone_value()); + let resource = + clone_reflect_value(resource.as_partial_reflect(), type_registration); self.extracted_resources.insert(component_id, resource); Some(()) diff --git a/crates/bevy_scene/src/lib.rs b/crates/bevy_scene/src/lib.rs index fe15866c14..a507a58aaf 100644 --- a/crates/bevy_scene/src/lib.rs +++ b/crates/bevy_scene/src/lib.rs @@ -15,6 +15,7 @@ extern crate alloc; mod components; mod dynamic_scene; mod dynamic_scene_builder; +mod reflect_utils; mod scene; mod scene_filter; mod scene_loader; @@ -26,7 +27,6 @@ pub mod serde; /// Rusty Object Notation, a crate used to serialize and deserialize bevy scenes. pub use bevy_asset::ron; -use bevy_ecs::schedule::IntoSystemConfigs; pub use components::*; pub use dynamic_scene::*; pub use dynamic_scene_builder::*; @@ -47,7 +47,9 @@ pub mod prelude { } use bevy_app::prelude::*; -use bevy_asset::AssetApp; + +#[cfg(feature = "serialize")] +use {bevy_asset::AssetApp, bevy_ecs::schedule::IntoScheduleConfigs}; /// Plugin that provides scene functionality to an [`App`]. #[derive(Default)] @@ -81,7 +83,7 @@ impl Plugin for ScenePlugin { if let Some(instance_ids) = scene_spawner.spawned_dynamic_scenes.get_mut(&id) { instance_ids.remove(&scene_instance); } - scene_spawner.despawn_instance(scene_instance); + scene_spawner.unregister_instance(scene_instance); } }); @@ -95,7 +97,7 @@ impl Plugin for ScenePlugin { let Some(mut scene_spawner) = world.get_resource_mut::() else { return; }; - scene_spawner.despawn_instance(scene_instance); + scene_spawner.unregister_instance(scene_instance); } }); } diff --git a/crates/bevy_scene/src/reflect_utils.rs b/crates/bevy_scene/src/reflect_utils.rs new file mode 100644 index 0000000000..bf69dd0352 --- /dev/null +++ b/crates/bevy_scene/src/reflect_utils.rs @@ -0,0 +1,25 @@ +use bevy_reflect::{PartialReflect, ReflectFromReflect, TypeRegistration}; + +/// Attempts to clone a [`PartialReflect`] value using various methods. +/// +/// This first attempts to clone via [`PartialReflect::reflect_clone`]. +/// then falls back to [`ReflectFromReflect::from_reflect`], +/// and finally [`PartialReflect::to_dynamic`] if the first two methods fail. +/// +/// This helps ensure that the original type and type data is retained, +/// and only returning a dynamic type if all other methods fail. +pub(super) fn clone_reflect_value( + value: &dyn PartialReflect, + type_registration: &TypeRegistration, +) -> Box { + value + .reflect_clone() + .map(PartialReflect::into_partial_reflect) + .unwrap_or_else(|_| { + type_registration + .data::() + .and_then(|fr| fr.from_reflect(value.as_partial_reflect())) + .map(PartialReflect::into_partial_reflect) + .unwrap_or_else(|| value.to_dynamic()) + }) +} diff --git a/crates/bevy_scene/src/scene.rs b/crates/bevy_scene/src/scene.rs index 9040b15e85..1d684c9dac 100644 --- a/crates/bevy_scene/src/scene.rs +++ b/crates/bevy_scene/src/scene.rs @@ -1,15 +1,17 @@ use core::any::TypeId; +use crate::reflect_utils::clone_reflect_value; use crate::{DynamicScene, SceneSpawnError}; use bevy_asset::Asset; use bevy_ecs::{ component::ComponentCloneBehavior, - entity::{hash_map::EntityHashMap, Entity, SceneEntityMapper}, + entity::{Entity, EntityHashMap, SceneEntityMapper}, entity_disabling::DefaultQueryFilters, reflect::{AppTypeRegistry, ReflectComponent, ReflectResource}, + relationship::RelationshipHookMode, world::World, }; -use bevy_reflect::{PartialReflect, TypePath}; +use bevy_reflect::TypePath; /// A composition of [`World`] objects. /// @@ -124,10 +126,8 @@ impl Scene { .get_info(component_id) .expect("component_ids in archetypes should have ComponentInfo"); - match component_info.clone_behavior() { - ComponentCloneBehavior::Ignore - | ComponentCloneBehavior::RelationshipTarget(_) => continue, - _ => {} + if *component_info.clone_behavior() == ComponentCloneBehavior::Ignore { + continue; } let registration = type_registry @@ -144,7 +144,9 @@ impl Scene { let Some(component) = reflect_component .reflect(self.world.entity(scene_entity.id())) - .map(PartialReflect::clone_value) + .map(|component| { + clone_reflect_value(component.as_partial_reflect(), registration) + }) else { continue; }; @@ -157,6 +159,7 @@ impl Scene { component.as_partial_reflect(), &type_registry, mapper, + RelationshipHookMode::Skip, ); }); } diff --git a/crates/bevy_scene/src/scene_filter.rs b/crates/bevy_scene/src/scene_filter.rs index 732a1e5f15..a3154c37e7 100644 --- a/crates/bevy_scene/src/scene_filter.rs +++ b/crates/bevy_scene/src/scene_filter.rs @@ -1,4 +1,4 @@ -use bevy_platform_support::collections::{hash_set::IntoIter, HashSet}; +use bevy_platform::collections::{hash_set::IntoIter, HashSet}; use core::any::{Any, TypeId}; /// A filter used to control which types can be added to a [`DynamicScene`]. diff --git a/crates/bevy_scene/src/scene_loader.rs b/crates/bevy_scene/src/scene_loader.rs index 481b7ebc04..d74dff84f5 100644 --- a/crates/bevy_scene/src/scene_loader.rs +++ b/crates/bevy_scene/src/scene_loader.rs @@ -1,21 +1,27 @@ -#[cfg(feature = "serialize")] -use crate::serde::SceneDeserializer; -use crate::{ron, DynamicScene}; -use bevy_asset::{io::Reader, AssetLoader, LoadContext}; +use crate::ron; use bevy_ecs::{ reflect::AppTypeRegistry, world::{FromWorld, World}, }; use bevy_reflect::TypeRegistryArc; -#[cfg(feature = "serialize")] -use serde::de::DeserializeSeed; use thiserror::Error; +#[cfg(feature = "serialize")] +use { + crate::{serde::SceneDeserializer, DynamicScene}, + bevy_asset::{io::Reader, AssetLoader, LoadContext}, + serde::de::DeserializeSeed, +}; + /// Asset loader for a Bevy dynamic scene (`.scn` / `.scn.ron`). /// /// The loader handles assets serialized with [`DynamicScene::serialize`]. #[derive(Debug)] pub struct SceneLoader { + #[cfg_attr( + not(feature = "serialize"), + expect(dead_code, reason = "only used with `serialize` feature") + )] type_registry: TypeRegistryArc, } diff --git a/crates/bevy_scene/src/scene_spawner.rs b/crates/bevy_scene/src/scene_spawner.rs index 3d4014f123..dce5ad971e 100644 --- a/crates/bevy_scene/src/scene_spawner.rs +++ b/crates/bevy_scene/src/scene_spawner.rs @@ -1,14 +1,14 @@ use crate::{DynamicScene, Scene}; use bevy_asset::{AssetEvent, AssetId, Assets, Handle}; use bevy_ecs::{ - entity::{hash_map::EntityHashMap, Entity}, + entity::{Entity, EntityHashMap}, event::{Event, EventCursor, Events}, hierarchy::ChildOf, reflect::AppTypeRegistry, resource::Resource, world::{Mut, World}, }; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_reflect::Reflect; use thiserror::Error; use uuid::Uuid; @@ -26,7 +26,7 @@ use bevy_ecs::{ /// /// [`Trigger`]: bevy_ecs::observer::Trigger #[derive(Clone, Copy, Debug, Eq, PartialEq, Event, Reflect)] -#[reflect(Debug, PartialEq)] +#[reflect(Debug, PartialEq, Clone)] pub struct SceneInstanceReady { /// Instance which has been spawned. pub instance_id: InstanceId, @@ -41,7 +41,7 @@ pub struct InstanceInfo { /// Unique id identifying a scene instance. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Reflect)] -#[reflect(Debug, PartialEq, Hash)] +#[reflect(Debug, PartialEq, Hash, Clone)] pub struct InstanceId(Uuid); impl InstanceId { @@ -179,10 +179,19 @@ impl SceneSpawner { } /// Schedule the despawn of a scene instance, removing all its entities from the world. + /// + /// Note: this will despawn _all_ entities associated with this instance, including those + /// that have been removed from the scene hierarchy. To despawn _only_ entities still in the hierarchy, + /// despawn the relevant root entity directly. pub fn despawn_instance(&mut self, instance_id: InstanceId) { self.instances_to_despawn.push(instance_id); } + /// This will remove all records of this instance, without despawning any entities. + pub fn unregister_instance(&mut self, instance_id: InstanceId) { + self.spawned_instances.remove(&instance_id); + } + /// Immediately despawns all instances of a dynamic scene. pub fn despawn_sync( &mut self, @@ -322,10 +331,7 @@ impl SceneSpawner { Ok(_) => { self.spawned_instances .insert(instance_id, InstanceInfo { entity_map }); - let spawned = self - .spawned_dynamic_scenes - .entry(handle.id()) - .or_insert_with(HashSet::default); + let spawned = self.spawned_dynamic_scenes.entry(handle.id()).or_default(); spawned.insert(instance_id); // Scenes with parents need more setup before they are ready. @@ -579,7 +585,8 @@ mod tests { let (scene_entity, scene_component_a) = app .world_mut() .query::<(Entity, &ComponentA)>() - .single(app.world()); + .single(app.world()) + .unwrap(); assert_eq!(scene_component_a.x, 3.0); assert_eq!(scene_component_a.y, 4.0); assert_eq!( @@ -622,7 +629,10 @@ mod tests { // clone only existing entity let mut scene_spawner = SceneSpawner::default(); - let entity = world.query_filtered::>().single(&world); + let entity = world + .query_filtered::>() + .single(&world) + .unwrap(); let scene = DynamicSceneBuilder::from_world(&world) .extract_entity(entity) .build(); @@ -861,4 +871,58 @@ mod tests { app.update(); check(app.world_mut(), 0); } + + #[test] + fn scene_child_order_preserved_when_archetype_order_mismatched() { + let mut app = App::new(); + + app.add_plugins(ScheduleRunnerPlugin::default()) + .add_plugins(AssetPlugin::default()) + .add_plugins(ScenePlugin) + .register_type::() + .register_type::(); + app.update(); + + let mut scene_world = World::new(); + let root = scene_world.spawn_empty().id(); + let temporary_root = scene_world.spawn_empty().id(); + // Spawn entities with different parent first before parenting them to the actual root, allowing us + // to decouple child order from archetype-creation-order + let child1 = scene_world + .spawn((ChildOf(temporary_root), ComponentA { x: 1.0, y: 1.0 })) + .id(); + let child2 = scene_world + .spawn((ChildOf(temporary_root), ComponentA { x: 2.0, y: 2.0 })) + .id(); + // the "first" child is intentionally spawned with a different component to force it into a "newer" archetype, + // meaning it will be iterated later in the spawn code. + let child0 = scene_world + .spawn((ChildOf(temporary_root), ComponentF)) + .id(); + + scene_world + .entity_mut(root) + .add_children(&[child0, child1, child2]); + + let scene = Scene::new(scene_world); + let scene_handle = app.world_mut().resource_mut::>().add(scene); + + let spawned = app.world_mut().spawn(SceneRoot(scene_handle.clone())).id(); + + app.update(); + let world = app.world_mut(); + + let spawned_root = world.entity(spawned).get::().unwrap()[0]; + let children = world.entity(spawned_root).get::().unwrap(); + assert_eq!(children.len(), 3); + assert!(world.entity(children[0]).get::().is_some()); + assert_eq!( + world.entity(children[1]).get::().unwrap().x, + 1.0 + ); + assert_eq!( + world.entity(children[2]).get::().unwrap().x, + 2.0 + ); + } } diff --git a/crates/bevy_scene/src/serde.rs b/crates/bevy_scene/src/serde.rs index 8e26863e79..ead8933a49 100644 --- a/crates/bevy_scene/src/serde.rs +++ b/crates/bevy_scene/src/serde.rs @@ -2,7 +2,7 @@ use crate::{DynamicEntity, DynamicScene}; use bevy_ecs::entity::Entity; -use bevy_platform_support::collections::HashSet; +use bevy_platform::collections::HashSet; use bevy_reflect::{ serde::{ ReflectDeserializer, TypeRegistrationDeserializer, TypedReflectDeserializer, @@ -515,14 +515,13 @@ mod tests { DynamicScene, DynamicSceneBuilder, }; use bevy_ecs::{ - entity::{hash_map::EntityHashMap, Entity}, + entity::{Entity, EntityHashMap}, prelude::{Component, ReflectComponent, ReflectResource, Resource, World}, query::{With, Without}, reflect::AppTypeRegistry, world::FromWorld, }; use bevy_reflect::{Reflect, ReflectDeserialize, ReflectSerialize}; - use bincode::Options; use serde::{de::DeserializeSeed, Deserialize, Serialize}; use std::io::BufReader; @@ -763,12 +762,12 @@ mod tests { let bar_to_foo = dst_world .query_filtered::<&MyEntityRef, Without>() - .get_single(&dst_world) + .single(&dst_world) .cloned() .unwrap(); let foo = dst_world .query_filtered::>() - .get_single(&dst_world) + .single(&dst_world) .unwrap(); assert_eq!(foo, bar_to_foo.0); @@ -793,7 +792,7 @@ mod tests { deserialized_scene .write_to_world(&mut world, &mut EntityHashMap::default()) .unwrap(); - assert_eq!(&qux, world.query::<&Qux>().single(&world)); + assert_eq!(&qux, world.query::<&Qux>().single(&world).unwrap()); } #[test] @@ -894,8 +893,9 @@ mod tests { let scene = DynamicScene::from_world(&world); + let config = bincode::config::standard().with_fixed_int_encoding(); let scene_serializer = SceneSerializer::new(&scene, registry); - let serialized_scene = bincode::serialize(&scene_serializer).unwrap(); + let serialized_scene = bincode::serde::encode_to_vec(&scene_serializer, config).unwrap(); assert_eq!( vec![ @@ -913,10 +913,9 @@ mod tests { type_registry: registry, }; - let deserialized_scene = bincode::DefaultOptions::new() - .with_fixint_encoding() - .deserialize_seed(scene_deserializer, &serialized_scene) - .unwrap(); + let (deserialized_scene, _read_bytes) = + bincode::serde::seed_decode_from_slice(scene_deserializer, &serialized_scene, config) + .unwrap(); assert_eq!(1, deserialized_scene.entities.len()); assert_scene_eq(&scene, &deserialized_scene); diff --git a/crates/bevy_sprite/Cargo.toml b/crates/bevy_sprite/Cargo.toml index dc9c10154f..8fa5bae2cc 100644 --- a/crates/bevy_sprite/Cargo.toml +++ b/crates/bevy_sprite/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_sprite" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides sprite functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -23,15 +23,13 @@ bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } bevy_picking = { path = "../bevy_picking", version = "0.16.0-dev", optional = true } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ - "bevy", -] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } bevy_window = { path = "../bevy_window", version = "0.16.0-dev", optional = true } bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", ] } diff --git a/crates/bevy_sprite/src/lib.rs b/crates/bevy_sprite/src/lib.rs index 568701725d..37d4d2d6e4 100644 --- a/crates/bevy_sprite/src/lib.rs +++ b/crates/bevy_sprite/src/lib.rs @@ -21,6 +21,11 @@ mod texture_slice; /// /// This includes the most common types in this crate, re-exported for your convenience. pub mod prelude { + #[cfg(feature = "bevy_sprite_picking_backend")] + #[doc(hidden)] + pub use crate::picking_backend::{ + SpritePickingCamera, SpritePickingMode, SpritePickingPlugin, SpritePickingSettings, + }; #[doc(hidden)] pub use crate::{ sprite::{Sprite, SpriteImageMode}, @@ -52,28 +57,8 @@ use bevy_render::{ }; /// Adds support for 2D sprite rendering. -pub struct SpritePlugin { - /// Whether to add the sprite picking backend to the app. - #[cfg(feature = "bevy_sprite_picking_backend")] - pub add_picking: bool, -} - -#[expect( - clippy::allow_attributes, - reason = "clippy::derivable_impls is not always linted" -)] -#[allow( - clippy::derivable_impls, - reason = "Known false positive with clippy: " -)] -impl Default for SpritePlugin { - fn default() -> Self { - Self { - #[cfg(feature = "bevy_sprite_picking_backend")] - add_picking: true, - } - } -} +#[derive(Default)] +pub struct SpritePlugin; pub const SPRITE_SHADER_HANDLE: Handle = weak_handle!("ed996613-54c0-49bd-81be-1c2d1a0d03c2"); @@ -125,9 +110,7 @@ impl Plugin for SpritePlugin { ); #[cfg(feature = "bevy_sprite_picking_backend")] - if self.add_picking { - app.add_plugins(SpritePickingPlugin); - } + app.add_plugins(SpritePickingPlugin); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app @@ -135,6 +118,7 @@ impl Plugin for SpritePlugin { .init_resource::>() .init_resource::() .init_resource::() + .init_resource::() .init_resource::() .add_render_command::() .add_systems( @@ -171,7 +155,7 @@ impl Plugin for SpritePlugin { /// System calculating and inserting an [`Aabb`] component to entities with either: /// - a `Mesh2d` component, /// - a `Sprite` and `Handle` components, -/// and without a [`NoFrustumCulling`] component. +/// and without a [`NoFrustumCulling`] component. /// /// Used in system set [`VisibilitySystems::CalculateBounds`]. pub fn calculate_bounds_2d( @@ -348,7 +332,7 @@ mod test { .world_mut() .spawn(Sprite { rect: Some(Rect::new(0., 0., 0.5, 1.)), - anchor: Anchor::TopRight, + anchor: Anchor::TOP_RIGHT, image: image_handle, ..default() }) diff --git a/crates/bevy_sprite/src/mesh2d/color_material.rs b/crates/bevy_sprite/src/mesh2d/color_material.rs index 15c965b7a7..83b6930776 100644 --- a/crates/bevy_sprite/src/mesh2d/color_material.rs +++ b/crates/bevy_sprite/src/mesh2d/color_material.rs @@ -3,7 +3,7 @@ use bevy_app::{App, Plugin}; use bevy_asset::{load_internal_asset, weak_handle, Asset, AssetApp, Assets, Handle}; use bevy_color::{Alpha, Color, ColorToComponents, LinearRgba}; use bevy_image::Image; -use bevy_math::Vec4; +use bevy_math::{Affine2, Mat3, Vec4}; use bevy_reflect::prelude::*; use bevy_render::{render_asset::RenderAssets, render_resource::*, texture::GpuImage}; @@ -40,11 +40,12 @@ impl Plugin for ColorMaterialPlugin { /// A [2d material](Material2d) that renders [2d meshes](crate::Mesh2d) with a texture tinted by a uniform color #[derive(Asset, AsBindGroup, Reflect, Debug, Clone)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] #[uniform(0, ColorMaterialUniform)] pub struct ColorMaterial { pub color: Color, pub alpha_mode: AlphaMode2d, + pub uv_transform: Affine2, #[texture(1)] #[sampler(2)] pub texture: Option>, @@ -61,6 +62,7 @@ impl Default for ColorMaterial { fn default() -> Self { ColorMaterial { color: Color::WHITE, + uv_transform: Affine2::default(), texture: None, // TODO should probably default to AlphaMask once supported? alpha_mode: AlphaMode2d::Blend, @@ -117,6 +119,7 @@ impl ColorMaterialFlags { #[derive(Clone, Default, ShaderType)] pub struct ColorMaterialUniform { pub color: Vec4, + pub uv_transform: Mat3, pub flags: u32, pub alpha_cutoff: f32, } @@ -140,6 +143,7 @@ impl AsBindGroupShaderType for ColorMaterial { }; ColorMaterialUniform { color: LinearRgba::from(self.color).to_f32_array().into(), + uv_transform: self.uv_transform.into(), flags: flags.bits(), alpha_cutoff, } diff --git a/crates/bevy_sprite/src/mesh2d/color_material.wgsl b/crates/bevy_sprite/src/mesh2d/color_material.wgsl index a166ce4530..a2dbe4e055 100644 --- a/crates/bevy_sprite/src/mesh2d/color_material.wgsl +++ b/crates/bevy_sprite/src/mesh2d/color_material.wgsl @@ -9,6 +9,7 @@ struct ColorMaterial { color: vec4, + uv_transform: mat3x3, // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. flags: u32, alpha_cutoff: f32, @@ -34,8 +35,10 @@ fn fragment( output_color = output_color * mesh.color; #endif + let uv = (material.uv_transform * vec3(mesh.uv, 1.0)).xy; + if ((material.flags & COLOR_MATERIAL_FLAGS_TEXTURE_BIT) != 0u) { - output_color = output_color * textureSample(texture, texture_sampler, mesh.uv); + output_color = output_color * textureSample(texture, texture_sampler, uv); } output_color = alpha_discard(material, output_color); diff --git a/crates/bevy_sprite/src/mesh2d/material.rs b/crates/bevy_sprite/src/mesh2d/material.rs index 7593b3d85e..e34595f138 100644 --- a/crates/bevy_sprite/src/mesh2d/material.rs +++ b/crates/bevy_sprite/src/mesh2d/material.rs @@ -13,16 +13,16 @@ use bevy_core_pipeline::{ }; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::component::Tick; -use bevy_ecs::entity::EntityHash; use bevy_ecs::system::SystemChangeTick; use bevy_ecs::{ prelude::*, system::{lifetimeless::SRes, SystemParamItem}, }; use bevy_math::FloatOrd; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::{prelude::ReflectDefault, Reflect}; -use bevy_render::render_phase::DrawFunctionId; +use bevy_render::camera::extract_cameras; +use bevy_render::render_phase::{DrawFunctionId, InputUniformIndex}; use bevy_render::render_resource::CachedRenderPipelineId; use bevy_render::view::RenderVisibleEntities; use bevy_render::{ @@ -45,6 +45,7 @@ use bevy_render::{ view::{ExtractedView, ViewVisibility}, Extract, ExtractSchedule, Render, RenderApp, RenderSet, }; +use bevy_utils::Parallel; use core::{hash::Hash, marker::PhantomData}; use derive_more::derive::From; use tracing::error; @@ -187,8 +188,8 @@ pub trait Material2d: AsBindGroup + Asset + Clone + Sized { /// ``` /// /// [`MeshMaterial2d`]: crate::MeshMaterial2d -#[derive(Component, Clone, Debug, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default)] +#[derive(Component, Clone, Debug, Deref, DerefMut, Reflect, From)] +#[reflect(Component, Default, Clone)] pub struct MeshMaterial2d(pub Handle); impl Default for MeshMaterial2d { @@ -197,6 +198,14 @@ impl Default for MeshMaterial2d { } } +impl PartialEq for MeshMaterial2d { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl Eq for MeshMaterial2d {} + impl From> for AssetId { fn from(material: MeshMaterial2d) -> Self { material.id() @@ -223,7 +232,7 @@ impl AsAssetId for MeshMaterial2d { /// This is very similar to [`AlphaMode`](bevy_render::alpha::AlphaMode) but this only applies to 2d meshes. /// We use a separate type because 2d doesn't support all the transparency modes that 3d does. #[derive(Debug, Default, Reflect, Copy, Clone, PartialEq)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub enum AlphaMode2d { /// Base color alpha values are overridden to be fully opaque (1.0). #[default] @@ -279,7 +288,7 @@ where .add_systems( ExtractSchedule, ( - extract_entities_needs_specialization::, + extract_entities_needs_specialization::.after(extract_cameras), extract_mesh_materials_2d::, ), ) @@ -548,10 +557,24 @@ pub const fn tonemapping_pipeline_key(tonemapping: Tonemapping) -> Mesh2dPipelin pub fn extract_entities_needs_specialization( entities_needing_specialization: Extract>>, mut entity_specialization_ticks: ResMut>, + mut removed_mesh_material_components: Extract>>, + mut specialized_material2d_pipeline_cache: ResMut>, + views: Query<&MainEntity, With>, ticks: SystemChangeTick, ) where M: Material2d, { + // Clean up any despawned entities, we do this first in case the removed material was re-added + // the same frame, thus will appear both in the removed components list and have been added to + // the `EntitiesNeedingSpecialization` collection by triggering the `Changed` filter + for entity in removed_mesh_material_components.read() { + entity_specialization_ticks.remove(&MainEntity::from(entity)); + for view in views { + if let Some(cache) = specialized_material2d_pipeline_cache.get_mut(view) { + cache.remove(&MainEntity::from(entity)); + } + } + } for entity in entities_needing_specialization.iter() { // Update the entity's specialization tick with this run's tick entity_specialization_ticks.insert((*entity).into(), ticks.this_run()); @@ -590,11 +613,22 @@ impl Default for EntitySpecializationTicks { } } +/// Stores the [`SpecializedMaterial2dViewPipelineCache`] for each view. #[derive(Resource, Deref, DerefMut)] pub struct SpecializedMaterial2dPipelineCache { - // (view_entity, material_entity) -> (tick, pipeline_id) + // view_entity -> view pipeline cache #[deref] - map: HashMap<(MainEntity, MainEntity), (Tick, CachedRenderPipelineId), EntityHash>, + map: MainEntityHashMap>, + marker: PhantomData, +} + +/// Stores the cached render pipeline ID for each entity in a single view, as +/// well as the last time it was changed. +#[derive(Deref, DerefMut)] +pub struct SpecializedMaterial2dViewPipelineCache { + // material entity -> (tick, pipeline_id) + #[deref] + map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>, marker: PhantomData, } @@ -607,24 +641,40 @@ impl Default for SpecializedMaterial2dPipelineCache { } } +impl Default for SpecializedMaterial2dViewPipelineCache { + fn default() -> Self { + Self { + map: HashMap::default(), + marker: PhantomData, + } + } +} + pub fn check_entities_needing_specialization( needs_specialization: Query< Entity, - Or<( - Changed, - AssetChanged, - Changed>, - AssetChanged>, - )>, + ( + Or<( + Changed, + AssetChanged, + Changed>, + AssetChanged>, + )>, + With>, + ), >, + mut par_local: Local>>, mut entities_needing_specialization: ResMut>, ) where M: Material2d, { entities_needing_specialization.clear(); - for entity in &needs_specialization { - entities_needing_specialization.push(entity); - } + + needs_specialization + .par_iter() + .for_each(|entity| par_local.borrow_local_mut().push(entity)); + + par_local.drain_into(&mut entities_needing_specialization); } pub fn specialize_material2d_meshes( @@ -665,11 +715,21 @@ pub fn specialize_material2d_meshes( continue; }; + let view_tick = view_specialization_ticks.get(view_entity).unwrap(); + let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache + .entry(*view_entity) + .or_default(); + for (_, visible_entity) in visible_entities.iter::() { - let view_tick = view_specialization_ticks.get(view_entity).unwrap(); + let Some(material_asset_id) = render_material_instances.get(visible_entity) else { + continue; + }; + let Some(mesh_instance) = render_mesh_instances.get_mut(visible_entity) else { + continue; + }; let entity_tick = entity_specialization_ticks.get(visible_entity).unwrap(); - let last_specialized_tick = specialized_material_pipeline_cache - .get(&(*view_entity, *visible_entity)) + let last_specialized_tick = view_specialized_material_pipeline_cache + .get(visible_entity) .map(|(tick, _)| *tick); let needs_specialization = last_specialized_tick.is_none_or(|tick| { view_tick.is_newer_than(tick, ticks.this_run()) @@ -678,13 +738,6 @@ pub fn specialize_material2d_meshes( if !needs_specialization { continue; } - - let Some(material_asset_id) = render_material_instances.get(visible_entity) else { - continue; - }; - let Some(mesh_instance) = render_mesh_instances.get_mut(visible_entity) else { - continue; - }; let Some(material_2d) = render_materials.get(*material_asset_id) else { continue; }; @@ -713,10 +766,8 @@ pub fn specialize_material2d_meshes( } }; - specialized_material_pipeline_cache.insert( - (*view_entity, *visible_entity), - (ticks.this_run(), pipeline_id), - ); + view_specialized_material_pipeline_cache + .insert(*visible_entity, (ticks.this_run(), pipeline_id)); } } } @@ -741,6 +792,12 @@ pub fn queue_material2d_meshes( } for (view_entity, view, visible_entities) in &views { + let Some(view_specialized_material_pipeline_cache) = + specialized_material_pipeline_cache.get(view_entity) + else { + continue; + }; + let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity) else { continue; @@ -754,8 +811,8 @@ pub fn queue_material2d_meshes( }; for (render_entity, visible_entity) in visible_entities.iter::() { - let Some((current_change_tick, pipeline_id)) = specialized_material_pipeline_cache - .get(&(*view_entity, *visible_entity)) + let Some((current_change_tick, pipeline_id)) = view_specialized_material_pipeline_cache + .get(visible_entity) .map(|(current_change_tick, pipeline_id)| (*current_change_tick, *pipeline_id)) else { continue; @@ -809,6 +866,7 @@ pub fn queue_material2d_meshes( }, bin_key, (*render_entity, *visible_entity), + InputUniformIndex::default(), binned_render_phase_type, current_change_tick, ); @@ -826,6 +884,7 @@ pub fn queue_material2d_meshes( }, bin_key, (*render_entity, *visible_entity), + InputUniformIndex::default(), binned_render_phase_type, current_change_tick, ); @@ -843,15 +902,12 @@ pub fn queue_material2d_meshes( // Batching is done in batch_and_prepare_render_phase batch_range: 0..1, extra_index: PhaseItemExtraIndex::None, + extracted_index: usize::MAX, indexed: mesh.indexed(), }); } } } - - // Remove invalid entities from the bins. - opaque_phase.sweep_old_entities(); - alpha_mask_phase.sweep_old_entities(); } } diff --git a/crates/bevy_sprite/src/mesh2d/mesh.rs b/crates/bevy_sprite/src/mesh2d/mesh.rs index 95fa6abd1c..5822d47ed6 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh.rs +++ b/crates/bevy_sprite/src/mesh2d/mesh.rs @@ -19,11 +19,12 @@ use bevy_ecs::{ }; use bevy_image::{BevyDefault, Image, ImageSampler, TextureFormatPixelInfo}; use bevy_math::{Affine3, Vec4}; +use bevy_render::mesh::MeshTag; use bevy_render::prelude::Msaa; use bevy_render::RenderSet::PrepareAssets; use bevy_render::{ batching::{ - gpu_preprocessing::IndirectParametersMetadata, + gpu_preprocessing::IndirectParametersCpuMetadata, no_gpu_preprocessing::{ self, batch_and_prepare_binned_render_phase, batch_and_prepare_sorted_render_phase, write_batched_instance_buffer, BatchedInstanceBuffer, @@ -37,7 +38,8 @@ use bevy_render::{ }, render_asset::RenderAssets, render_phase::{ - PhaseItem, PhaseItemExtraIndex, RenderCommand, RenderCommandResult, TrackedRenderPass, + sweep_old_entities, PhaseItem, PhaseItemExtraIndex, RenderCommand, RenderCommandResult, + TrackedRenderPass, }, render_resource::{binding_types::uniform_buffer, *}, renderer::{RenderDevice, RenderQueue}, @@ -113,6 +115,11 @@ impl Plugin for Mesh2dRenderPlugin { .add_systems( Render, ( + ( + sweep_old_entities::, + sweep_old_entities::, + ) + .in_set(RenderSet::QueueSweep), batch_and_prepare_binned_render_phase:: .in_set(RenderSet::PrepareResources), batch_and_prepare_binned_render_phase:: @@ -230,10 +237,11 @@ pub struct Mesh2dUniform { pub local_from_world_transpose_a: [Vec4; 2], pub local_from_world_transpose_b: f32, pub flags: u32, + pub tag: u32, } -impl From<&Mesh2dTransforms> for Mesh2dUniform { - fn from(mesh_transforms: &Mesh2dTransforms) -> Self { +impl Mesh2dUniform { + fn from_components(mesh_transforms: &Mesh2dTransforms, tag: u32) -> Self { let (local_from_world_transpose_a, local_from_world_transpose_b) = mesh_transforms.world_from_local.inverse_transpose_3x3(); Self { @@ -241,6 +249,7 @@ impl From<&Mesh2dTransforms> for Mesh2dUniform { local_from_world_transpose_a, local_from_world_transpose_b, flags: mesh_transforms.flags, + tag, } } } @@ -259,6 +268,7 @@ pub struct RenderMesh2dInstance { pub mesh_asset_id: AssetId, pub material_bind_group_id: Material2dBindGroupId, pub automatic_batching: bool, + pub tag: u32, } #[derive(Default, Resource, Deref, DerefMut)] @@ -275,13 +285,14 @@ pub fn extract_mesh2d( &ViewVisibility, &GlobalTransform, &Mesh2d, + Option<&MeshTag>, Has, )>, >, ) { render_mesh_instances.clear(); - for (entity, view_visibility, transform, handle, no_automatic_batching) in &query { + for (entity, view_visibility, transform, handle, tag, no_automatic_batching) in &query { if !view_visibility.get() { continue; } @@ -295,6 +306,7 @@ pub fn extract_mesh2d( mesh_asset_id: handle.0.id(), material_bind_group_id: Material2dBindGroupId::default(), automatic_batching: !no_automatic_batching, + tag: tag.map_or(0, |i| **i), }, ); } @@ -359,7 +371,7 @@ impl FromWorld for Mesh2dPipeline { let format_size = image.texture_descriptor.format.pixel_size(); render_queue.write_texture( texture.as_image_copy(), - &image.data, + image.data.as_ref().expect("Image has no data"), TexelCopyBufferLayout { offset: 0, bytes_per_row: Some(image.width() * format_size as u32), @@ -422,7 +434,7 @@ impl GetBatchData for Mesh2dPipeline { ) -> Option<(Self::BufferData, Option)> { let mesh_instance = mesh_instances.get(&main_entity)?; Some(( - (&mesh_instance.transforms).into(), + Mesh2dUniform::from_components(&mesh_instance.transforms, mesh_instance.tag), mesh_instance.automatic_batching.then_some(( mesh_instance.material_bind_group_id, mesh_instance.mesh_asset_id, @@ -439,7 +451,10 @@ impl GetFullBatchData for Mesh2dPipeline { main_entity: MainEntity, ) -> Option { let mesh_instance = mesh_instances.get(&main_entity)?; - Some((&mesh_instance.transforms).into()) + Some(Mesh2dUniform::from_components( + &mesh_instance.transforms, + mesh_instance.tag, + )) } fn get_index_and_compare_data( @@ -465,32 +480,31 @@ impl GetFullBatchData for Mesh2dPipeline { } fn write_batch_indirect_parameters_metadata( - input_index: u32, indexed: bool, base_output_index: u32, batch_set_index: Option, - indirect_parameters_buffer: &mut bevy_render::batching::gpu_preprocessing::IndirectParametersBuffers, + indirect_parameters_buffer: &mut bevy_render::batching::gpu_preprocessing::UntypedPhaseIndirectParametersBuffers, indirect_parameters_offset: u32, ) { // Note that `IndirectParameters` covers both of these structures, even // though they actually have distinct layouts. See the comment above that // type for more information. - let indirect_parameters = IndirectParametersMetadata { - mesh_index: input_index, + let indirect_parameters = IndirectParametersCpuMetadata { base_output_index, batch_set_index: match batch_set_index { None => !0, Some(batch_set_index) => u32::from(batch_set_index), }, - early_instance_count: 0, - late_instance_count: 0, }; if indexed { - indirect_parameters_buffer.set_indexed(indirect_parameters_offset, indirect_parameters); + indirect_parameters_buffer + .indexed + .set(indirect_parameters_offset, indirect_parameters); } else { indirect_parameters_buffer - .set_non_indexed(indirect_parameters_offset, indirect_parameters); + .non_indexed + .set(indirect_parameters_offset, indirect_parameters); } } } diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl index 0b99482211..dbd73fb171 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl @@ -43,3 +43,7 @@ fn mesh2d_tangent_local_to_world(world_from_local: mat4x4, vertex_tangent: vertex_tangent.w ); } + +fn get_tag(instance_index: u32) -> u32 { + return mesh[instance_index].tag; +} \ No newline at end of file diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_types.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_types.wgsl index d5038c818d..e29264e0bf 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d_types.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_types.wgsl @@ -13,4 +13,5 @@ struct Mesh2d { local_from_world_transpose_b: f32, // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. flags: u32, + tag: u32, }; diff --git a/crates/bevy_sprite/src/mesh2d/wireframe2d.rs b/crates/bevy_sprite/src/mesh2d/wireframe2d.rs index 4547f83d9b..63e5280519 100644 --- a/crates/bevy_sprite/src/mesh2d/wireframe2d.rs +++ b/crates/bevy_sprite/src/mesh2d/wireframe2d.rs @@ -1,18 +1,61 @@ -use crate::{Material2d, Material2dKey, Material2dPlugin, Mesh2d}; -use bevy_app::{Plugin, Startup, Update}; -use bevy_asset::{load_internal_asset, weak_handle, Asset, AssetApp, Assets, Handle}; -use bevy_color::{Color, LinearRgba}; -use bevy_ecs::prelude::*; +use crate::{ + DrawMesh2d, Mesh2dPipeline, Mesh2dPipelineKey, RenderMesh2dInstances, SetMesh2dBindGroup, + SetMesh2dViewBindGroup, ViewKeyCache, ViewSpecializationTicks, +}; +use bevy_app::{App, Plugin, PostUpdate, Startup, Update}; +use bevy_asset::{ + load_internal_asset, prelude::AssetChanged, weak_handle, AsAssetId, Asset, AssetApp, + AssetEvents, AssetId, Assets, Handle, UntypedAssetId, +}; +use bevy_color::{Color, ColorToComponents}; +use bevy_core_pipeline::core_2d::{ + graph::{Core2d, Node2d}, + Camera2d, +}; +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::{ + component::Tick, + prelude::*, + query::QueryItem, + system::{lifetimeless::SRes, SystemChangeTick, SystemParamItem}, +}; +use bevy_platform::{ + collections::{HashMap, HashSet}, + hash::FixedHasher, +}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ - extract_resource::ExtractResource, mesh::MeshVertexBufferLayoutRef, prelude::*, + batching::gpu_preprocessing::GpuPreprocessingMode, + camera::ExtractedCamera, + extract_resource::ExtractResource, + mesh::{ + allocator::{MeshAllocator, SlabId}, + Mesh2d, MeshVertexBufferLayoutRef, RenderMesh, + }, + prelude::*, + render_asset::{ + prepare_assets, PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets, + }, + render_graph::{NodeRunError, RenderGraphApp, RenderGraphContext, ViewNode, ViewNodeRunner}, + render_phase::{ + AddRenderCommand, BinnedPhaseItem, BinnedRenderPhasePlugin, BinnedRenderPhaseType, + CachedRenderPipelinePhaseItem, DrawFunctionId, DrawFunctions, InputUniformIndex, PhaseItem, + PhaseItemBatchSetKey, PhaseItemExtraIndex, RenderCommand, RenderCommandResult, + SetItemPipeline, TrackedRenderPass, ViewBinnedRenderPhases, + }, render_resource::*, + renderer::RenderContext, + sync_world::{MainEntity, MainEntityHashMap}, + view::{ + ExtractedView, RenderVisibleEntities, RetainedViewEntity, ViewDepthTexture, ViewTarget, + }, + Extract, Render, RenderApp, RenderDebugFlags, RenderSet, }; - -use super::MeshMaterial2d; +use core::{hash::Hash, ops::Range}; +use tracing::error; pub const WIREFRAME_2D_SHADER_HANDLE: Handle = - weak_handle!("3d8a3853-2927-4de2-9dc7-3971e7e40970"); + weak_handle!("2d8a3853-2927-4de2-9dc7-3971e7e40970"); /// A [`Plugin`] that draws wireframes for 2D meshes. /// @@ -24,9 +67,20 @@ pub const WIREFRAME_2D_SHADER_HANDLE: Handle = /// /// This is a native only feature. #[derive(Debug, Default)] -pub struct Wireframe2dPlugin; +pub struct Wireframe2dPlugin { + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, +} + +impl Wireframe2dPlugin { + /// Creates a new [`Wireframe2dPlugin`] with the given debug flags. + pub fn new(debug_flags: RenderDebugFlags) -> Self { + Self { debug_flags } + } +} + impl Plugin for Wireframe2dPlugin { - fn build(&self, app: &mut bevy_app::App) { + fn build(&self, app: &mut App) { load_internal_asset!( app, WIREFRAME_2D_SHADER_HANDLE, @@ -34,25 +88,83 @@ impl Plugin for Wireframe2dPlugin { Shader::from_wgsl ); - app.register_type::() - .register_type::() - .register_type::() - .register_type::() - .init_resource::() - .add_plugins(Material2dPlugin::::default()) - .register_asset_reflect::() - .add_systems(Startup, setup_global_wireframe_material) - .add_systems( - Update, + app.add_plugins(( + BinnedRenderPhasePlugin::::new(self.debug_flags), + RenderAssetPlugin::::default(), + )) + .init_asset::() + .init_resource::>() + .register_type::() + .register_type::() + .register_type::() + .init_resource::() + .init_resource::() + .add_systems(Startup, setup_global_wireframe_material) + .add_systems( + Update, + ( + global_color_changed.run_if(resource_changed::), + wireframe_color_changed, + // Run `apply_global_wireframe_material` after `apply_wireframe_material` so that the global + // wireframe setting is applied to a mesh on the same frame its wireframe marker component is removed. + (apply_wireframe_material, apply_global_wireframe_material).chain(), + ), + ) + .add_systems( + PostUpdate, + check_wireframe_entities_needing_specialization + .after(AssetEvents) + .run_if(resource_exists::), + ); + + let Some(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + + render_app + .init_resource::() + .init_resource::() + .init_resource::>() + .add_render_command::() + .init_resource::() + .init_resource::>() + .add_render_graph_node::>(Core2d, Node2d::Wireframe) + .add_render_graph_edges( + Core2d, ( - global_color_changed.run_if(resource_changed::), - wireframe_color_changed, - // Run `apply_global_wireframe_material` after `apply_wireframe_material` so that the global - // wireframe setting is applied to a mesh on the same frame its wireframe marker component is removed. - (apply_wireframe_material, apply_global_wireframe_material).chain(), + Node2d::EndMainPass, + Node2d::Wireframe, + Node2d::PostProcessing, + ), + ) + .add_systems( + ExtractSchedule, + ( + extract_wireframe_2d_camera, + extract_wireframe_entities_needing_specialization, + extract_wireframe_materials, + ), + ) + .add_systems( + Render, + ( + specialize_wireframes + .in_set(RenderSet::PrepareMeshes) + .after(prepare_assets::) + .after(prepare_assets::), + queue_wireframes + .in_set(RenderSet::QueueMeshes) + .after(prepare_assets::), ), ); } + + fn finish(&self, app: &mut App) { + let Some(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + render_app.init_resource::(); + } } /// Enables wireframe rendering for any entity it is attached to. @@ -63,6 +175,245 @@ impl Plugin for Wireframe2dPlugin { #[reflect(Component, Default, Debug, PartialEq)] pub struct Wireframe2d; +pub struct Wireframe2dPhaseItem { + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: Wireframe2dBatchSetKey, + /// The key, which determines which can be batched. + pub bin_key: Wireframe2dBinKey, + /// An entity from which data will be fetched, including the mesh if + /// applicable. + pub representative_entity: (Entity, MainEntity), + /// The ranges of instances. + pub batch_range: Range, + /// An extra index, which is either a dynamic offset or an index in the + /// indirect parameters list. + pub extra_index: PhaseItemExtraIndex, +} + +impl PhaseItem for Wireframe2dPhaseItem { + fn entity(&self) -> Entity { + self.representative_entity.0 + } + + fn main_entity(&self) -> MainEntity { + self.representative_entity.1 + } + + fn draw_function(&self) -> DrawFunctionId { + self.batch_set_key.draw_function + } + + fn batch_range(&self) -> &Range { + &self.batch_range + } + + fn batch_range_mut(&mut self) -> &mut Range { + &mut self.batch_range + } + + fn extra_index(&self) -> PhaseItemExtraIndex { + self.extra_index.clone() + } + + fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range, &mut PhaseItemExtraIndex) { + (&mut self.batch_range, &mut self.extra_index) + } +} + +impl CachedRenderPipelinePhaseItem for Wireframe2dPhaseItem { + fn cached_pipeline(&self) -> CachedRenderPipelineId { + self.batch_set_key.pipeline + } +} + +impl BinnedPhaseItem for Wireframe2dPhaseItem { + type BinKey = Wireframe2dBinKey; + type BatchSetKey = Wireframe2dBatchSetKey; + + fn new( + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, + representative_entity: (Entity, MainEntity), + batch_range: Range, + extra_index: PhaseItemExtraIndex, + ) -> Self { + Self { + batch_set_key, + bin_key, + representative_entity, + batch_range, + extra_index, + } + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Wireframe2dBatchSetKey { + /// The identifier of the render pipeline. + pub pipeline: CachedRenderPipelineId, + + /// The wireframe material asset ID. + pub asset_id: UntypedAssetId, + + /// The function used to draw. + pub draw_function: DrawFunctionId, + /// The ID of the slab of GPU memory that contains vertex data. + /// + /// For non-mesh items, you can fill this with 0 if your items can be + /// multi-drawn, or with a unique value if they can't. + pub vertex_slab: SlabId, + + /// The ID of the slab of GPU memory that contains index data, if present. + /// + /// For non-mesh items, you can safely fill this with `None`. + pub index_slab: Option, +} + +impl PhaseItemBatchSetKey for Wireframe2dBatchSetKey { + fn indexed(&self) -> bool { + self.index_slab.is_some() + } +} + +/// Data that must be identical in order to *batch* phase items together. +/// +/// Note that a *batch set* (if multi-draw is in use) contains multiple batches. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Wireframe2dBinKey { + /// The wireframe mesh asset ID. + pub asset_id: UntypedAssetId, +} + +pub struct SetWireframe2dPushConstants; + +impl RenderCommand

for SetWireframe2dPushConstants { + type Param = ( + SRes, + SRes>, + ); + type ViewQuery = (); + type ItemQuery = (); + + #[inline] + fn render<'w>( + item: &P, + _view: (), + _item_query: Option<()>, + (wireframe_instances, wireframe_assets): SystemParamItem<'w, '_, Self::Param>, + pass: &mut TrackedRenderPass<'w>, + ) -> RenderCommandResult { + let Some(wireframe_material) = wireframe_instances.get(&item.main_entity()) else { + return RenderCommandResult::Failure("No wireframe material found for entity"); + }; + let Some(wireframe_material) = wireframe_assets.get(*wireframe_material) else { + return RenderCommandResult::Failure("No wireframe material found for entity"); + }; + + pass.set_push_constants( + ShaderStages::FRAGMENT, + 0, + bytemuck::bytes_of(&wireframe_material.color), + ); + RenderCommandResult::Success + } +} + +pub type DrawWireframe2d = ( + SetItemPipeline, + SetMesh2dViewBindGroup<0>, + SetMesh2dBindGroup<1>, + SetWireframe2dPushConstants, + DrawMesh2d, +); + +#[derive(Resource, Clone)] +pub struct Wireframe2dPipeline { + mesh_pipeline: Mesh2dPipeline, + shader: Handle, +} + +impl FromWorld for Wireframe2dPipeline { + fn from_world(render_world: &mut World) -> Self { + Wireframe2dPipeline { + mesh_pipeline: render_world.resource::().clone(), + shader: WIREFRAME_2D_SHADER_HANDLE, + } + } +} + +impl SpecializedMeshPipeline for Wireframe2dPipeline { + type Key = Mesh2dPipelineKey; + + fn specialize( + &self, + key: Self::Key, + layout: &MeshVertexBufferLayoutRef, + ) -> Result { + let mut descriptor = self.mesh_pipeline.specialize(key, layout)?; + descriptor.label = Some("wireframe_2d_pipeline".into()); + descriptor.push_constant_ranges.push(PushConstantRange { + stages: ShaderStages::FRAGMENT, + range: 0..16, + }); + let fragment = descriptor.fragment.as_mut().unwrap(); + fragment.shader = self.shader.clone(); + descriptor.primitive.polygon_mode = PolygonMode::Line; + descriptor.depth_stencil.as_mut().unwrap().bias.slope_scale = 1.0; + Ok(descriptor) + } +} + +#[derive(Default)] +struct Wireframe2dNode; +impl ViewNode for Wireframe2dNode { + type ViewQuery = ( + &'static ExtractedCamera, + &'static ExtractedView, + &'static ViewTarget, + &'static ViewDepthTexture, + ); + + fn run<'w>( + &self, + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + (camera, view, target, depth): QueryItem<'w, Self::ViewQuery>, + world: &'w World, + ) -> Result<(), NodeRunError> { + let Some(wireframe_phase) = + world.get_resource::>() + else { + return Ok(()); + }; + + let Some(wireframe_phase) = wireframe_phase.get(&view.retained_view_entity) else { + return Ok(()); + }; + + let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { + label: Some("wireframe_2d_pass"), + color_attachments: &[Some(target.get_color_attachment())], + depth_stencil_attachment: Some(depth.get_attachment(StoreOp::Store)), + timestamp_writes: None, + occlusion_query_set: None, + }); + + if let Some(viewport) = camera.viewport.as_ref() { + render_pass.set_camera_viewport(viewport); + } + + if let Err(err) = wireframe_phase.render(&mut render_pass, world, graph.view_entity()) { + error!("Error encountered while rendering the stencil phase {err:?}"); + return Err(NodeRunError::DrawError(err)); + } + + Ok(()) + } +} + /// Sets the color of the [`Wireframe2d`] of the entity it is attached to. /// /// If this component is present but there's no [`Wireframe2d`] component, @@ -75,6 +426,11 @@ pub struct Wireframe2dColor { pub color: Color, } +#[derive(Component, Debug, Clone, Default)] +pub struct ExtractedWireframeColor { + pub color: [f32; 4], +} + /// Disables wireframe rendering for any entity it is attached to. /// It will ignore the [`Wireframe2dConfig`] global setting. /// @@ -86,7 +442,7 @@ pub struct NoWireframe2d; #[derive(Resource, Debug, Clone, Default, ExtractResource, Reflect)] #[reflect(Resource, Debug, Default)] pub struct Wireframe2dConfig { - /// Whether to show wireframes for all 2D meshes. + /// Whether to show wireframes for all meshes. /// Can be overridden for individual meshes by adding a [`Wireframe2d`] or [`NoWireframe2d`] component. pub global: bool, /// If [`Self::global`] is set, any [`Entity`] that does not have a [`Wireframe2d`] component attached to it will have @@ -95,21 +451,121 @@ pub struct Wireframe2dConfig { pub default_color: Color, } +#[derive(Asset, Reflect, Clone, Debug, Default)] +#[reflect(Clone, Default)] +pub struct Wireframe2dMaterial { + pub color: Color, +} + +pub struct RenderWireframeMaterial { + pub color: [f32; 4], +} + +#[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq)] +#[reflect(Component, Default, Clone, PartialEq)] +pub struct Mesh2dWireframe(pub Handle); + +impl AsAssetId for Mesh2dWireframe { + type Asset = Wireframe2dMaterial; + + fn as_asset_id(&self) -> AssetId { + self.0.id() + } +} + +impl RenderAsset for RenderWireframeMaterial { + type SourceAsset = Wireframe2dMaterial; + type Param = (); + + fn prepare_asset( + source_asset: Self::SourceAsset, + _asset_id: AssetId, + _param: &mut SystemParamItem, + ) -> Result> { + Ok(RenderWireframeMaterial { + color: source_asset.color.to_linear().to_f32_array(), + }) + } +} + +#[derive(Resource, Deref, DerefMut, Default)] +pub struct RenderWireframeInstances(MainEntityHashMap>); + +#[derive(Clone, Resource, Deref, DerefMut, Debug, Default)] +pub struct WireframeEntitiesNeedingSpecialization { + #[deref] + pub entities: Vec, +} + +#[derive(Resource, Deref, DerefMut, Clone, Debug, Default)] +pub struct WireframeEntitySpecializationTicks { + pub entities: MainEntityHashMap, +} + +/// Stores the [`SpecializedWireframeViewPipelineCache`] for each view. +#[derive(Resource, Deref, DerefMut, Default)] +pub struct SpecializedWireframePipelineCache { + // view entity -> view pipeline cache + #[deref] + map: HashMap, +} + +/// Stores the cached render pipeline ID for each entity in a single view, as +/// well as the last time it was changed. +#[derive(Deref, DerefMut, Default)] +pub struct SpecializedWireframeViewPipelineCache { + // material entity -> (tick, pipeline_id) + #[deref] + map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>, +} + #[derive(Resource)] -struct GlobalWireframe2dMaterial { +struct GlobalWireframeMaterial { // This handle will be reused when the global config is enabled handle: Handle, } +pub fn extract_wireframe_materials( + mut material_instances: ResMut, + changed_meshes_query: Extract< + Query< + (Entity, &ViewVisibility, &Mesh2dWireframe), + Or<(Changed, Changed)>, + >, + >, + mut removed_visibilities_query: Extract>, + mut removed_materials_query: Extract>, +) { + for (entity, view_visibility, material) in &changed_meshes_query { + if view_visibility.get() { + material_instances.insert(entity.into(), material.id()); + } else { + material_instances.remove(&MainEntity::from(entity)); + } + } + + for entity in removed_visibilities_query + .read() + .chain(removed_materials_query.read()) + { + // Only queue a mesh for removal if we didn't pick it up above. + // It's possible that a necessary component was removed and re-added in + // the same frame. + if !changed_meshes_query.contains(entity) { + material_instances.remove(&MainEntity::from(entity)); + } + } +} + fn setup_global_wireframe_material( mut commands: Commands, mut materials: ResMut>, config: Res, ) { // Create the handle used for the global material - commands.insert_resource(GlobalWireframe2dMaterial { + commands.insert_resource(GlobalWireframeMaterial { handle: materials.add(Wireframe2dMaterial { - color: config.default_color.into(), + color: config.default_color, }), }); } @@ -118,10 +574,10 @@ fn setup_global_wireframe_material( fn global_color_changed( config: Res, mut materials: ResMut>, - global_material: Res, + global_material: Res, ) { if let Some(global_material) = materials.get_mut(&global_material.handle) { - global_material.color = config.default_color.into(); + global_material.color = config.default_color; } } @@ -129,13 +585,13 @@ fn global_color_changed( fn wireframe_color_changed( mut materials: ResMut>, mut colors_changed: Query< - (&mut MeshMaterial2d, &Wireframe2dColor), + (&mut Mesh2dWireframe, &Wireframe2dColor), (With, Changed), >, ) { for (mut handle, wireframe_color) in &mut colors_changed { handle.0 = materials.add(Wireframe2dMaterial { - color: wireframe_color.color.into(), + color: wireframe_color.color, }); } } @@ -147,99 +603,277 @@ fn apply_wireframe_material( mut materials: ResMut>, wireframes: Query< (Entity, Option<&Wireframe2dColor>), - ( - With, - Without>, - ), - >, - no_wireframes: Query< - Entity, - ( - With, - With>, - ), + (With, Without), >, + no_wireframes: Query, With)>, mut removed_wireframes: RemovedComponents, - global_material: Res, + global_material: Res, ) { for e in removed_wireframes.read().chain(no_wireframes.iter()) { - if let Some(mut commands) = commands.get_entity(e) { - commands.remove::>(); + if let Ok(mut commands) = commands.get_entity(e) { + commands.remove::(); } } - let mut wireframes_to_spawn = vec![]; - for (e, wireframe_color) in &wireframes { - let material = if let Some(wireframe_color) = wireframe_color { - materials.add(Wireframe2dMaterial { - color: wireframe_color.color.into(), - }) - } else { - // If there's no color specified we can use the global material since it's already set to use the default_color - global_material.handle.clone() - }; - wireframes_to_spawn.push((e, MeshMaterial2d(material))); + let mut material_to_spawn = vec![]; + for (e, maybe_color) in &wireframes { + let material = get_wireframe_material(maybe_color, &mut materials, &global_material); + material_to_spawn.push((e, Mesh2dWireframe(material))); } - commands.insert_or_spawn_batch(wireframes_to_spawn); + commands.try_insert_batch(material_to_spawn); } -type Wireframe2dFilter = (With, Without, Without); +type WireframeFilter = (With, Without, Without); /// Applies or removes a wireframe material on any mesh without a [`Wireframe2d`] or [`NoWireframe2d`] component. fn apply_global_wireframe_material( mut commands: Commands, config: Res, meshes_without_material: Query< - Entity, - ( - Wireframe2dFilter, - Without>, - ), + (Entity, Option<&Wireframe2dColor>), + (WireframeFilter, Without), >, - meshes_with_global_material: Query< - Entity, - (Wireframe2dFilter, With>), - >, - global_material: Res, + meshes_with_global_material: Query)>, + global_material: Res, + mut materials: ResMut>, ) { if config.global { let mut material_to_spawn = vec![]; - for e in &meshes_without_material { + for (e, maybe_color) in &meshes_without_material { + let material = get_wireframe_material(maybe_color, &mut materials, &global_material); // We only add the material handle but not the Wireframe component // This makes it easy to detect which mesh is using the global material and which ones are user specified - material_to_spawn.push((e, MeshMaterial2d(global_material.handle.clone()))); + material_to_spawn.push((e, Mesh2dWireframe(material))); } - commands.insert_or_spawn_batch(material_to_spawn); + commands.try_insert_batch(material_to_spawn); } else { for e in &meshes_with_global_material { - commands - .entity(e) - .remove::>(); + commands.entity(e).remove::(); } } } -#[derive(Default, AsBindGroup, Debug, Clone, Asset, Reflect)] -pub struct Wireframe2dMaterial { - #[uniform(0)] - pub color: LinearRgba, -} - -impl Material2d for Wireframe2dMaterial { - fn fragment_shader() -> ShaderRef { - WIREFRAME_2D_SHADER_HANDLE.into() - } - - fn depth_bias(&self) -> f32 { - 1.0 - } - - fn specialize( - descriptor: &mut RenderPipelineDescriptor, - _layout: &MeshVertexBufferLayoutRef, - _key: Material2dKey, - ) -> Result<(), SpecializedMeshPipelineError> { - descriptor.primitive.polygon_mode = PolygonMode::Line; - Ok(()) +/// Gets a handle to a wireframe material with a fallback on the default material +fn get_wireframe_material( + maybe_color: Option<&Wireframe2dColor>, + wireframe_materials: &mut Assets, + global_material: &GlobalWireframeMaterial, +) -> Handle { + if let Some(wireframe_color) = maybe_color { + wireframe_materials.add(Wireframe2dMaterial { + color: wireframe_color.color, + }) + } else { + // If there's no color specified we can use the global material since it's already set to use the default_color + global_material.handle.clone() + } +} + +fn extract_wireframe_2d_camera( + mut wireframe_2d_phases: ResMut>, + cameras: Extract>>, + mut live_entities: Local>, +) { + live_entities.clear(); + for (main_entity, camera) in &cameras { + if !camera.is_active { + continue; + } + let retained_view_entity = RetainedViewEntity::new(main_entity.into(), None, 0); + wireframe_2d_phases.prepare_for_new_frame(retained_view_entity, GpuPreprocessingMode::None); + live_entities.insert(retained_view_entity); + } + + // Clear out all dead views. + wireframe_2d_phases.retain(|camera_entity, _| live_entities.contains(camera_entity)); +} + +pub fn extract_wireframe_entities_needing_specialization( + entities_needing_specialization: Extract>, + mut entity_specialization_ticks: ResMut, + views: Query<&ExtractedView>, + mut specialized_wireframe_pipeline_cache: ResMut, + mut removed_meshes_query: Extract>, + ticks: SystemChangeTick, +) { + for entity in entities_needing_specialization.iter() { + // Update the entity's specialization tick with this run's tick + entity_specialization_ticks.insert((*entity).into(), ticks.this_run()); + } + + for entity in removed_meshes_query.read() { + for view in &views { + if let Some(specialized_wireframe_pipeline_cache) = + specialized_wireframe_pipeline_cache.get_mut(&view.retained_view_entity) + { + specialized_wireframe_pipeline_cache.remove(&MainEntity::from(entity)); + } + } + } +} + +pub fn check_wireframe_entities_needing_specialization( + needs_specialization: Query< + Entity, + Or<( + Changed, + AssetChanged, + Changed, + AssetChanged, + )>, + >, + mut entities_needing_specialization: ResMut, +) { + entities_needing_specialization.clear(); + for entity in &needs_specialization { + entities_needing_specialization.push(entity); + } +} + +pub fn specialize_wireframes( + render_meshes: Res>, + render_mesh_instances: Res, + render_wireframe_instances: Res, + wireframe_phases: Res>, + views: Query<(&ExtractedView, &RenderVisibleEntities)>, + view_key_cache: Res, + entity_specialization_ticks: Res, + view_specialization_ticks: Res, + mut specialized_material_pipeline_cache: ResMut, + mut pipelines: ResMut>, + pipeline: Res, + pipeline_cache: Res, + ticks: SystemChangeTick, +) { + // Record the retained IDs of all views so that we can expire old + // pipeline IDs. + let mut all_views: HashSet = HashSet::default(); + + for (view, visible_entities) in &views { + all_views.insert(view.retained_view_entity); + + if !wireframe_phases.contains_key(&view.retained_view_entity) { + continue; + } + + let Some(view_key) = view_key_cache.get(&view.retained_view_entity.main_entity) else { + continue; + }; + + let view_tick = view_specialization_ticks + .get(&view.retained_view_entity.main_entity) + .unwrap(); + let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache + .entry(view.retained_view_entity) + .or_default(); + + for (_, visible_entity) in visible_entities.iter::() { + if !render_wireframe_instances.contains_key(visible_entity) { + continue; + }; + let Some(mesh_instance) = render_mesh_instances.get(visible_entity) else { + continue; + }; + let entity_tick = entity_specialization_ticks.get(visible_entity).unwrap(); + let last_specialized_tick = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(tick, _)| *tick); + let needs_specialization = last_specialized_tick.is_none_or(|tick| { + view_tick.is_newer_than(tick, ticks.this_run()) + || entity_tick.is_newer_than(tick, ticks.this_run()) + }); + if !needs_specialization { + continue; + } + let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else { + continue; + }; + + let mut mesh_key = *view_key; + mesh_key |= Mesh2dPipelineKey::from_primitive_topology(mesh.primitive_topology()); + + let pipeline_id = + pipelines.specialize(&pipeline_cache, &pipeline, mesh_key, &mesh.layout); + let pipeline_id = match pipeline_id { + Ok(id) => id, + Err(err) => { + error!("{}", err); + continue; + } + }; + + view_specialized_material_pipeline_cache + .insert(*visible_entity, (ticks.this_run(), pipeline_id)); + } + } + + // Delete specialized pipelines belonging to views that have expired. + specialized_material_pipeline_cache + .retain(|retained_view_entity, _| all_views.contains(retained_view_entity)); +} + +fn queue_wireframes( + custom_draw_functions: Res>, + render_mesh_instances: Res, + mesh_allocator: Res, + specialized_wireframe_pipeline_cache: Res, + render_wireframe_instances: Res, + mut wireframe_2d_phases: ResMut>, + mut views: Query<(&ExtractedView, &RenderVisibleEntities)>, +) { + for (view, visible_entities) in &mut views { + let Some(wireframe_phase) = wireframe_2d_phases.get_mut(&view.retained_view_entity) else { + continue; + }; + let draw_wireframe = custom_draw_functions.read().id::(); + + let Some(view_specialized_material_pipeline_cache) = + specialized_wireframe_pipeline_cache.get(&view.retained_view_entity) + else { + continue; + }; + + for (render_entity, visible_entity) in visible_entities.iter::() { + let Some(wireframe_instance) = render_wireframe_instances.get(visible_entity) else { + continue; + }; + let Some((current_change_tick, pipeline_id)) = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(current_change_tick, pipeline_id)| (*current_change_tick, *pipeline_id)) + else { + continue; + }; + + // Skip the entity if it's cached in a bin and up to date. + if wireframe_phase.validate_cached_entity(*visible_entity, current_change_tick) { + continue; + } + let Some(mesh_instance) = render_mesh_instances.get(visible_entity) else { + continue; + }; + let (vertex_slab, index_slab) = mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id); + let bin_key = Wireframe2dBinKey { + asset_id: mesh_instance.mesh_asset_id.untyped(), + }; + let batch_set_key = Wireframe2dBatchSetKey { + pipeline: pipeline_id, + asset_id: wireframe_instance.untyped(), + draw_function: draw_wireframe, + vertex_slab: vertex_slab.unwrap_or_default(), + index_slab, + }; + wireframe_phase.add( + batch_set_key, + bin_key, + (*render_entity, *visible_entity), + InputUniformIndex::default(), + if mesh_instance.automatic_batching { + BinnedRenderPhaseType::BatchableMesh + } else { + BinnedRenderPhaseType::UnbatchableMesh + }, + current_change_tick, + ); + } } } diff --git a/crates/bevy_sprite/src/mesh2d/wireframe2d.wgsl b/crates/bevy_sprite/src/mesh2d/wireframe2d.wgsl index fac02d6456..c7bb3aa791 100644 --- a/crates/bevy_sprite/src/mesh2d/wireframe2d.wgsl +++ b/crates/bevy_sprite/src/mesh2d/wireframe2d.wgsl @@ -1,11 +1,12 @@ #import bevy_sprite::mesh2d_vertex_output::VertexOutput -struct WireframeMaterial { - color: vec4, -}; +struct PushConstants { + color: vec4 +} + +var push_constants: PushConstants; -@group(2) @binding(0) var material: WireframeMaterial; @fragment fn fragment(in: VertexOutput) -> @location(0) vec4 { - return material.color; + return push_constants.color; } diff --git a/crates/bevy_sprite/src/picking_backend.rs b/crates/bevy_sprite/src/picking_backend.rs index 5517629d32..a029838147 100644 --- a/crates/bevy_sprite/src/picking_backend.rs +++ b/crates/bevy_sprite/src/picking_backend.rs @@ -1,6 +1,11 @@ //! A [`bevy_picking`] backend for sprites. Works for simple sprites and sprite atlases. Works for //! sprites with arbitrary transforms. Picking is done based on sprite bounds, not visible pixels. //! This means a partially transparent sprite is pickable even in its transparent areas. +//! +//! ## Implementation Notes +//! +//! - The `position` reported in `HitData` in in world space, and the `normal` is a normalized +//! vector provided by the target's `GlobalTransform::back()`. use crate::Sprite; use bevy_app::prelude::*; @@ -15,14 +20,17 @@ use bevy_render::prelude::*; use bevy_transform::prelude::*; use bevy_window::PrimaryWindow; -/// A component that marks cameras that should be used in the [`SpritePickingPlugin`]. +/// An optional component that marks cameras that should be used in the [`SpritePickingPlugin`]. +/// +/// Only needed if [`SpritePickingSettings::require_markers`] is set to `true`, and ignored +/// otherwise. #[derive(Debug, Clone, Default, Component, Reflect)] -#[reflect(Debug, Default, Component)] +#[reflect(Debug, Default, Component, Clone)] pub struct SpritePickingCamera; /// How should the [`SpritePickingPlugin`] handle picking and how should it handle transparent pixels #[derive(Debug, Clone, Copy, Reflect)] -#[reflect(Debug)] +#[reflect(Debug, Clone)] pub enum SpritePickingMode { /// Even if a sprite is picked on a transparent pixel, it should still count within the backend. /// Only consider the rect of a given sprite. @@ -37,7 +45,7 @@ pub enum SpritePickingMode { #[reflect(Resource, Default)] pub struct SpritePickingSettings { /// When set to `true` sprite picking will only consider cameras marked with - /// [`SpritePickingCamera`] and entities marked with [`Pickable`]. `false` by default. + /// [`SpritePickingCamera`]. /// /// This setting is provided to give you fine-grained control over which cameras and entities /// should be used by the sprite picking backend at runtime. @@ -57,17 +65,16 @@ impl Default for SpritePickingSettings { } } +/// Enables the sprite picking backend, allowing you to click on, hover over and drag sprites. #[derive(Clone)] pub struct SpritePickingPlugin; impl Plugin for SpritePickingPlugin { fn build(&self, app: &mut App) { app.init_resource::() - .register_type::<( - SpritePickingCamera, - SpritePickingMode, - SpritePickingSettings, - )>() + .register_type::() + .register_type::() + .register_type::() .add_systems(PreUpdate, sprite_picking.in_set(PickSet::Backend)); } } @@ -89,7 +96,7 @@ fn sprite_picking( Entity, &Sprite, &GlobalTransform, - Option<&Pickable>, + &Pickable, &ViewVisibility, )>, mut output: EventWriter, @@ -97,8 +104,7 @@ fn sprite_picking( let mut sorted_sprites: Vec<_> = sprite_query .iter() .filter_map(|(entity, sprite, transform, pickable, vis)| { - let marker_requirement = !settings.require_markers || pickable.is_some(); - if !transform.affine().is_nan() && vis.get() && marker_requirement { + if !transform.affine().is_nan() && vis.get() { Some((entity, sprite, transform, pickable)) } else { None @@ -111,7 +117,7 @@ fn sprite_picking( -transform.translation().z }); - let primary_window = primary_window.get_single().ok(); + let primary_window = primary_window.single().ok(); for (pointer, location) in pointers.iter().filter_map(|(pointer, pointer_location)| { pointer_location.location().map(|loc| (pointer, loc)) @@ -214,8 +220,7 @@ fn sprite_picking( } }; - blocked = cursor_in_valid_pixels_of_sprite - && pickable.is_none_or(|p| p.should_block_lower); + blocked = cursor_in_valid_pixels_of_sprite && pickable.should_block_lower; cursor_in_valid_pixels_of_sprite.then(|| { let hit_pos_world = @@ -241,6 +246,6 @@ fn sprite_picking( .collect(); let order = camera.order as f32; - output.send(PointerHits::new(*pointer, picks, order)); + output.write(PointerHits::new(*pointer, picks, order)); } } diff --git a/crates/bevy_sprite/src/render/mod.rs b/crates/bevy_sprite/src/render/mod.rs index 41dbe42161..de57f43536 100644 --- a/crates/bevy_sprite/src/render/mod.rs +++ b/crates/bevy_sprite/src/render/mod.rs @@ -18,8 +18,7 @@ use bevy_ecs::{ }; use bevy_image::{BevyDefault, Image, ImageSampler, TextureAtlasLayout, TextureFormatPixelInfo}; use bevy_math::{Affine3A, FloatOrd, Quat, Rect, Vec2, Vec4}; -use bevy_platform_support::collections::HashMap; -use bevy_render::sync_world::MainEntity; +use bevy_platform::collections::HashMap; use bevy_render::view::{RenderVisibleEntities, RetainedViewEntity}; use bevy_render::{ render_asset::RenderAssets, @@ -32,7 +31,7 @@ use bevy_render::{ *, }, renderer::{RenderDevice, RenderQueue}, - sync_world::{RenderEntity, TemporaryRenderEntity}, + sync_world::RenderEntity, texture::{DefaultImageSampler, FallbackImage, GpuImage}, view::{ ExtractedView, Msaa, ViewTarget, ViewUniform, ViewUniformOffset, ViewUniforms, @@ -102,7 +101,7 @@ impl FromWorld for SpritePipeline { let format_size = image.texture_descriptor.format.pixel_size(); render_queue.write_texture( texture.as_image_copy(), - &image.data, + image.data.as_ref().expect("Image has no data"), TexelCopyBufferLayout { offset: 0, bytes_per_row: Some(image.width() * format_size as u32), @@ -324,28 +323,47 @@ impl SpecializedRenderPipeline for SpritePipeline { } } +pub struct ExtractedSlice { + pub offset: Vec2, + pub rect: Rect, + pub size: Vec2, +} + pub struct ExtractedSprite { + pub main_entity: Entity, + pub render_entity: Entity, pub transform: GlobalTransform, pub color: LinearRgba, - /// Select an area of the texture - pub rect: Option, /// Change the on-screen size of the sprite - pub custom_size: Option, /// Asset ID of the [`Image`] of this sprite /// PERF: storing an `AssetId` instead of `Handle` enables some optimizations (`ExtractedSprite` becomes `Copy` and doesn't need to be dropped) pub image_handle_id: AssetId, pub flip_x: bool, pub flip_y: bool, - pub anchor: Vec2, - /// For cases where additional [`ExtractedSprites`] are created during extraction, this stores the - /// entity that caused that creation for use in determining visibility. - pub original_entity: Option, - pub scaling_mode: Option, + pub kind: ExtractedSpriteKind, +} + +pub enum ExtractedSpriteKind { + /// A single sprite with custom sizing and scaling options + Single { + anchor: Vec2, + rect: Option, + scaling_mode: Option, + custom_size: Option, + }, + /// Indexes into the list of [`ExtractedSlice`]s stored in the [`ExtractedSlices`] resource + /// Used for elements composed from multiple sprites such as text or nine-patched borders + Slices { indices: Range }, } #[derive(Resource, Default)] pub struct ExtractedSprites { - pub sprites: HashMap<(Entity, MainEntity), ExtractedSprite>, + pub sprites: Vec, +} + +#[derive(Resource, Default)] +pub struct ExtractedSlices { + pub slices: Vec, } #[derive(Resource, Default)] @@ -366,8 +384,8 @@ pub fn extract_sprite_events( } pub fn extract_sprites( - mut commands: Commands, mut extracted_sprites: ResMut, + mut extracted_slices: ResMut, texture_atlases: Extract>>, sprite_query: Extract< Query<( @@ -381,26 +399,32 @@ pub fn extract_sprites( >, ) { extracted_sprites.sprites.clear(); - for (original_entity, entity, view_visibility, sprite, transform, slices) in sprite_query.iter() + extracted_slices.slices.clear(); + for (main_entity, render_entity, view_visibility, sprite, transform, slices) in + sprite_query.iter() { if !view_visibility.get() { continue; } if let Some(slices) = slices { - extracted_sprites.sprites.extend( - slices - .extract_sprites(transform, original_entity, sprite) - .map(|e| { - ( - ( - commands.spawn(TemporaryRenderEntity).id(), - original_entity.into(), - ), - e, - ) - }), - ); + let start = extracted_slices.slices.len(); + extracted_slices + .slices + .extend(slices.extract_slices(sprite)); + let end = extracted_slices.slices.len(); + extracted_sprites.sprites.push(ExtractedSprite { + main_entity, + render_entity, + color: sprite.color.into(), + transform: *transform, + flip_x: sprite.flip_x, + flip_y: sprite.flip_y, + image_handle_id: sprite.image.id(), + kind: ExtractedSpriteKind::Slices { + indices: start..end, + }, + }); } else { let atlas_rect = sprite .texture_atlas @@ -413,28 +437,27 @@ pub fn extract_sprites( (Some(atlas_rect), Some(mut sprite_rect)) => { sprite_rect.min += atlas_rect.min; sprite_rect.max += atlas_rect.min; - Some(sprite_rect) } }; // PERF: we don't check in this function that the `Image` asset is ready, since it should be in most cases and hashing the handle is expensive - extracted_sprites.sprites.insert( - (entity, original_entity.into()), - ExtractedSprite { - color: sprite.color.into(), - transform: *transform, + extracted_sprites.sprites.push(ExtractedSprite { + main_entity, + render_entity, + color: sprite.color.into(), + transform: *transform, + flip_x: sprite.flip_x, + flip_y: sprite.flip_y, + image_handle_id: sprite.image.id(), + kind: ExtractedSpriteKind::Single { + anchor: sprite.anchor.as_vec(), rect, + scaling_mode: sprite.image_mode.scale(), // Pass the custom size custom_size: sprite.custom_size, - flip_x: sprite.flip_x, - flip_y: sprite.flip_y, - image_handle_id: sprite.image.id(), - anchor: sprite.anchor.as_vec(), - original_entity: Some(original_entity), - scaling_mode: sprite.image_mode.scale(), }, - ); + }); } } } @@ -561,10 +584,10 @@ pub fn queue_sprites( .items .reserve(extracted_sprites.sprites.len()); - for ((entity, main_entity), extracted_sprite) in extracted_sprites.sprites.iter() { - let index = extracted_sprite.original_entity.unwrap_or(*entity).index(); + for (index, extracted_sprite) in extracted_sprites.sprites.iter().enumerate() { + let view_index = extracted_sprite.main_entity.index(); - if !view_entities.contains(index as usize) { + if !view_entities.contains(view_index as usize) { continue; } @@ -575,11 +598,15 @@ pub fn queue_sprites( transparent_phase.add(Transparent2d { draw_function: draw_sprite_function, pipeline, - entity: (*entity, *main_entity), + entity: ( + extracted_sprite.render_entity, + extracted_sprite.main_entity.into(), + ), sort_key, // `batch_range` is calculated in `prepare_sprite_image_bind_groups` batch_range: 0..0, extra_index: PhaseItemExtraIndex::None, + extracted_index: index, indexed: true, }); } @@ -627,6 +654,7 @@ pub fn prepare_sprite_image_bind_groups( mut image_bind_groups: ResMut, gpu_images: Res>, extracted_sprites: Res, + extracted_slices: Res, mut phases: ResMut>, events: Res, mut batches: ResMut, @@ -664,7 +692,12 @@ pub fn prepare_sprite_image_bind_groups( // Compatible items share the same entity. for item_index in 0..transparent_phase.items.len() { let item = &transparent_phase.items[item_index]; - let Some(extracted_sprite) = extracted_sprites.sprites.get(&item.entity) else { + + let Some(extracted_sprite) = extracted_sprites + .sprites + .get(item.extracted_index) + .filter(|extracted_sprite| extracted_sprite.render_entity == item.entity()) + else { // If there is a phase item that is not a sprite, then we must start a new // batch to draw the other phase item(s) and to respect draw order. This can be // done by invalidating the batch_image_handle @@ -701,112 +734,161 @@ pub fn prepare_sprite_image_bind_groups( }, )); } - - // By default, the size of the quad is the size of the texture - let mut quad_size = batch_image_size; - - // Texture size is the size of the image - let mut texture_size = batch_image_size; - - // If a rect is specified, adjust UVs and the size of the quad - let mut uv_offset_scale = if let Some(rect) = extracted_sprite.rect { - let rect_size = rect.size(); - quad_size = rect_size; - // Update texture size to the rect size - // It will help scale properly only portion of the image - texture_size = rect_size; - Vec4::new( - rect.min.x / batch_image_size.x, - rect.max.y / batch_image_size.y, - rect_size.x / batch_image_size.x, - -rect_size.y / batch_image_size.y, - ) - } else { - Vec4::new(0.0, 1.0, 1.0, -1.0) - }; - - // Override the size if a custom one is specified - if let Some(custom_size) = extracted_sprite.custom_size { - quad_size = custom_size; - } - - // Used for translation of the quad if `TextureScale::Fit...` is specified. - let mut quad_translation = Vec2::ZERO; - - // Scales the texture based on the `texture_scale` field. - if let Some(scaling_mode) = extracted_sprite.scaling_mode { - apply_scaling( + match extracted_sprite.kind { + ExtractedSpriteKind::Single { + anchor, + rect, scaling_mode, - texture_size, - &mut quad_size, - &mut quad_translation, - &mut uv_offset_scale, - ); + custom_size, + } => { + // By default, the size of the quad is the size of the texture + let mut quad_size = batch_image_size; + let mut texture_size = batch_image_size; + + // Calculate vertex data for this item + // If a rect is specified, adjust UVs and the size of the quad + let mut uv_offset_scale = if let Some(rect) = rect { + let rect_size = rect.size(); + quad_size = rect_size; + // Update texture size to the rect size + // It will help scale properly only portion of the image + texture_size = rect_size; + Vec4::new( + rect.min.x / batch_image_size.x, + rect.max.y / batch_image_size.y, + rect_size.x / batch_image_size.x, + -rect_size.y / batch_image_size.y, + ) + } else { + Vec4::new(0.0, 1.0, 1.0, -1.0) + }; + + if extracted_sprite.flip_x { + uv_offset_scale.x += uv_offset_scale.z; + uv_offset_scale.z *= -1.0; + } + if extracted_sprite.flip_y { + uv_offset_scale.y += uv_offset_scale.w; + uv_offset_scale.w *= -1.0; + } + + // Override the size if a custom one is specified + quad_size = custom_size.unwrap_or(quad_size); + + // Used for translation of the quad if `TextureScale::Fit...` is specified. + let mut quad_translation = Vec2::ZERO; + + // Scales the texture based on the `texture_scale` field. + if let Some(scaling_mode) = scaling_mode { + apply_scaling( + scaling_mode, + texture_size, + &mut quad_size, + &mut quad_translation, + &mut uv_offset_scale, + ); + } + + let transform = extracted_sprite.transform.affine() + * Affine3A::from_scale_rotation_translation( + quad_size.extend(1.0), + Quat::IDENTITY, + ((quad_size + quad_translation) * (-anchor - Vec2::splat(0.5))) + .extend(0.0), + ); + + // Store the vertex data and add the item to the render phase + sprite_meta + .sprite_instance_buffer + .push(SpriteInstance::from( + &transform, + &extracted_sprite.color, + &uv_offset_scale, + )); + + current_batch.as_mut().unwrap().get_mut().range.end += 1; + index += 1; + } + ExtractedSpriteKind::Slices { ref indices } => { + for i in indices.clone() { + let slice = &extracted_slices.slices[i]; + let rect = slice.rect; + let rect_size = rect.size(); + + // Calculate vertex data for this item + let mut uv_offset_scale: Vec4; + + // If a rect is specified, adjust UVs and the size of the quad + uv_offset_scale = Vec4::new( + rect.min.x / batch_image_size.x, + rect.max.y / batch_image_size.y, + rect_size.x / batch_image_size.x, + -rect_size.y / batch_image_size.y, + ); + + if extracted_sprite.flip_x { + uv_offset_scale.x += uv_offset_scale.z; + uv_offset_scale.z *= -1.0; + } + if extracted_sprite.flip_y { + uv_offset_scale.y += uv_offset_scale.w; + uv_offset_scale.w *= -1.0; + } + + let transform = extracted_sprite.transform.affine() + * Affine3A::from_scale_rotation_translation( + slice.size.extend(1.0), + Quat::IDENTITY, + (slice.size * -Vec2::splat(0.5) + slice.offset).extend(0.0), + ); + + // Store the vertex data and add the item to the render phase + sprite_meta + .sprite_instance_buffer + .push(SpriteInstance::from( + &transform, + &extracted_sprite.color, + &uv_offset_scale, + )); + + current_batch.as_mut().unwrap().get_mut().range.end += 1; + index += 1; + } + } } - - if extracted_sprite.flip_x { - uv_offset_scale.x += uv_offset_scale.z; - uv_offset_scale.z *= -1.0; - } - if extracted_sprite.flip_y { - uv_offset_scale.y += uv_offset_scale.w; - uv_offset_scale.w *= -1.0; - } - - let transform = extracted_sprite.transform.affine() - * Affine3A::from_scale_rotation_translation( - quad_size.extend(1.0), - Quat::IDENTITY, - ((quad_size + quad_translation) - * (-extracted_sprite.anchor - Vec2::splat(0.5))) - .extend(0.0), - ); - - // Store the vertex data and add the item to the render phase - sprite_meta - .sprite_instance_buffer - .push(SpriteInstance::from( - &transform, - &extracted_sprite.color, - &uv_offset_scale, - )); - transparent_phase.items[batch_item_index] .batch_range_mut() .end += 1; - current_batch.as_mut().unwrap().get_mut().range.end += 1; - index += 1; + } + sprite_meta + .sprite_instance_buffer + .write_buffer(&render_device, &render_queue); + + if sprite_meta.sprite_index_buffer.len() != 6 { + sprite_meta.sprite_index_buffer.clear(); + + // NOTE: This code is creating 6 indices pointing to 4 vertices. + // The vertices form the corners of a quad based on their two least significant bits. + // 10 11 + // + // 00 01 + // The sprite shader can then use the two least significant bits as the vertex index. + // The rest of the properties to transform the vertex positions and UVs (which are + // implicit) are baked into the instance transform, and UV offset and scale. + // See bevy_sprite/src/render/sprite.wgsl for the details. + sprite_meta.sprite_index_buffer.push(2); + sprite_meta.sprite_index_buffer.push(0); + sprite_meta.sprite_index_buffer.push(1); + sprite_meta.sprite_index_buffer.push(1); + sprite_meta.sprite_index_buffer.push(3); + sprite_meta.sprite_index_buffer.push(2); + + sprite_meta + .sprite_index_buffer + .write_buffer(&render_device, &render_queue); } } - sprite_meta - .sprite_instance_buffer - .write_buffer(&render_device, &render_queue); - - if sprite_meta.sprite_index_buffer.len() != 6 { - sprite_meta.sprite_index_buffer.clear(); - - // NOTE: This code is creating 6 indices pointing to 4 vertices. - // The vertices form the corners of a quad based on their two least significant bits. - // 10 11 - // - // 00 01 - // The sprite shader can then use the two least significant bits as the vertex index. - // The rest of the properties to transform the vertex positions and UVs (which are - // implicit) are baked into the instance transform, and UV offset and scale. - // See bevy_sprite/src/render/sprite.wgsl for the details. - sprite_meta.sprite_index_buffer.push(2); - sprite_meta.sprite_index_buffer.push(0); - sprite_meta.sprite_index_buffer.push(1); - sprite_meta.sprite_index_buffer.push(1); - sprite_meta.sprite_index_buffer.push(3); - sprite_meta.sprite_index_buffer.push(2); - - sprite_meta - .sprite_index_buffer - .write_buffer(&render_device, &render_queue); - } } - /// [`RenderCommand`] for sprite rendering. pub type DrawSprite = ( SetItemPipeline, diff --git a/crates/bevy_sprite/src/sprite.rs b/crates/bevy_sprite/src/sprite.rs index 59c60071a0..32b8ebb49e 100644 --- a/crates/bevy_sprite/src/sprite.rs +++ b/crates/bevy_sprite/src/sprite.rs @@ -1,9 +1,7 @@ use bevy_asset::{Assets, Handle}; use bevy_color::Color; -use bevy_ecs::{ - component::{require, Component}, - reflect::ReflectComponent, -}; +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::{component::Component, reflect::ReflectComponent}; use bevy_image::{Image, TextureAtlas, TextureAtlasLayout}; use bevy_math::{Rect, UVec2, Vec2}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -18,7 +16,7 @@ use crate::TextureSlicer; /// Describes a sprite to be rendered to a 2D camera #[derive(Component, Debug, Default, Clone, Reflect)] #[require(Transform, Visibility, SyncToRenderWorld, VisibilityClass)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[component(on_add = view::add_visibility_class::)] pub struct Sprite { /// The image used to render the sprite @@ -157,7 +155,7 @@ impl From> for Sprite { /// Controls how the image is altered when scaled. #[derive(Default, Debug, Clone, Reflect, PartialEq)] -#[reflect(Debug)] +#[reflect(Debug, Default, Clone)] pub enum SpriteImageMode { /// The sprite will take on the size of the image by default, and will be stretched or shrunk if [`Sprite::custom_size`] is set. #[default] @@ -205,7 +203,7 @@ impl SpriteImageMode { /// /// Can be used in [`SpriteImageMode::Scale`]. #[derive(Debug, Clone, Copy, PartialEq, Default, Reflect)] -#[reflect(Debug)] +#[reflect(Debug, Default, Clone)] pub enum ScalingMode { /// Scale the texture uniformly (maintain the texture's aspect ratio) /// so that both dimensions (width and height) of the texture will be equal @@ -243,41 +241,37 @@ pub enum ScalingMode { FitEnd, } -/// How a sprite is positioned relative to its [`Transform`]. -/// It defaults to `Anchor::Center`. -#[derive(Component, Debug, Clone, Copy, PartialEq, Default, Reflect)] -#[reflect(Component, Default, Debug, PartialEq)] +/// Normalized (relative to its size) offset of a 2d renderable entity from its [`Transform`]. +#[derive(Component, Debug, Clone, Copy, PartialEq, Deref, DerefMut, Reflect)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] #[doc(alias = "pivot")] -pub enum Anchor { - #[default] - Center, - BottomLeft, - BottomCenter, - BottomRight, - CenterLeft, - CenterRight, - TopLeft, - TopCenter, - TopRight, - /// Custom anchor point. Top left is `(-0.5, 0.5)`, center is `(0.0, 0.0)`. The value will - /// be scaled with the sprite size. - Custom(Vec2), -} +pub struct Anchor(pub Vec2); impl Anchor { + pub const BOTTOM_LEFT: Self = Self(Vec2::new(-0.5, -0.5)); + pub const BOTTOM_CENTER: Self = Self(Vec2::new(0.0, -0.5)); + pub const BOTTOM_RIGHT: Self = Self(Vec2::new(0.5, -0.5)); + pub const CENTER_LEFT: Self = Self(Vec2::new(-0.5, 0.0)); + pub const CENTER: Self = Self(Vec2::ZERO); + pub const CENTER_RIGHT: Self = Self(Vec2::new(0.5, 0.0)); + pub const TOP_LEFT: Self = Self(Vec2::new(-0.5, 0.5)); + pub const TOP_CENTER: Self = Self(Vec2::new(0.0, 0.5)); + pub const TOP_RIGHT: Self = Self(Vec2::new(0.5, 0.5)); + pub fn as_vec(&self) -> Vec2 { - match self { - Anchor::Center => Vec2::ZERO, - Anchor::BottomLeft => Vec2::new(-0.5, -0.5), - Anchor::BottomCenter => Vec2::new(0.0, -0.5), - Anchor::BottomRight => Vec2::new(0.5, -0.5), - Anchor::CenterLeft => Vec2::new(-0.5, 0.0), - Anchor::CenterRight => Vec2::new(0.5, 0.0), - Anchor::TopLeft => Vec2::new(-0.5, 0.5), - Anchor::TopCenter => Vec2::new(0.0, 0.5), - Anchor::TopRight => Vec2::new(0.5, 0.5), - Anchor::Custom(point) => *point, - } + self.0 + } +} + +impl Default for Anchor { + fn default() -> Self { + Self::CENTER + } +} + +impl From for Anchor { + fn from(value: Vec2) -> Self { + Self(value) } } @@ -361,7 +355,7 @@ mod tests { let sprite = Sprite { image, - anchor: Anchor::BottomLeft, + anchor: Anchor::BOTTOM_LEFT, ..Default::default() }; @@ -383,7 +377,7 @@ mod tests { let sprite = Sprite { image, - anchor: Anchor::TopRight, + anchor: Anchor::TOP_RIGHT, ..Default::default() }; @@ -405,7 +399,7 @@ mod tests { let sprite = Sprite { image, - anchor: Anchor::BottomLeft, + anchor: Anchor::BOTTOM_LEFT, flip_x: true, ..Default::default() }; @@ -428,7 +422,7 @@ mod tests { let sprite = Sprite { image, - anchor: Anchor::TopRight, + anchor: Anchor::TOP_RIGHT, flip_y: true, ..Default::default() }; @@ -452,7 +446,7 @@ mod tests { let sprite = Sprite { image, rect: Some(Rect::new(1.5, 3.0, 3.0, 9.5)), - anchor: Anchor::BottomLeft, + anchor: Anchor::BOTTOM_LEFT, ..Default::default() }; @@ -476,7 +470,7 @@ mod tests { let sprite = Sprite { image, - anchor: Anchor::BottomLeft, + anchor: Anchor::BOTTOM_LEFT, texture_atlas: Some(TextureAtlas { layout: texture_atlas, index: 0, @@ -504,7 +498,7 @@ mod tests { let sprite = Sprite { image, - anchor: Anchor::BottomLeft, + anchor: Anchor::BOTTOM_LEFT, texture_atlas: Some(TextureAtlas { layout: texture_atlas, index: 0, diff --git a/crates/bevy_sprite/src/texture_slice/border_rect.rs b/crates/bevy_sprite/src/texture_slice/border_rect.rs index adc90a626a..00e4fcb8b1 100644 --- a/crates/bevy_sprite/src/texture_slice/border_rect.rs +++ b/crates/bevy_sprite/src/texture_slice/border_rect.rs @@ -1,10 +1,11 @@ -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; /// Defines the extents of the border of a rectangle. /// /// This struct is used to represent thickness or offsets from the edges /// of a rectangle (left, right, top, and bottom), with values increasing inwards. #[derive(Default, Copy, Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq, Default)] pub struct BorderRect { /// Extent of the border along the left edge pub left: f32, diff --git a/crates/bevy_sprite/src/texture_slice/computed_slices.rs b/crates/bevy_sprite/src/texture_slice/computed_slices.rs index c258e5f652..f36cf4bfac 100644 --- a/crates/bevy_sprite/src/texture_slice/computed_slices.rs +++ b/crates/bevy_sprite/src/texture_slice/computed_slices.rs @@ -1,12 +1,11 @@ -use crate::{ExtractedSprite, Sprite, SpriteImageMode, TextureAtlasLayout}; +use crate::{ExtractedSlice, Sprite, SpriteImageMode, TextureAtlasLayout}; use super::TextureSlice; use bevy_asset::{AssetEvent, Assets}; use bevy_ecs::prelude::*; use bevy_image::Image; use bevy_math::{Rect, Vec2}; -use bevy_platform_support::collections::HashSet; -use bevy_transform::prelude::*; +use bevy_platform::collections::HashSet; /// Component storing texture slices for tiled or sliced sprite entities /// @@ -15,59 +14,33 @@ use bevy_transform::prelude::*; pub struct ComputedTextureSlices(Vec); impl ComputedTextureSlices { - /// Computes [`ExtractedSprite`] iterator from the sprite slices + /// Computes [`ExtractedSlice`] iterator from the sprite slices /// /// # Arguments /// - /// * `transform` - the sprite entity global transform - /// * `original_entity` - the sprite entity /// * `sprite` - The sprite component - /// * `handle` - The sprite texture handle #[must_use] - pub(crate) fn extract_sprites<'a>( + pub(crate) fn extract_slices<'a>( &'a self, - transform: &'a GlobalTransform, - original_entity: Entity, sprite: &'a Sprite, - ) -> impl ExactSizeIterator + 'a { + ) -> impl ExactSizeIterator + 'a { let mut flip = Vec2::ONE; - let [mut flip_x, mut flip_y] = [false; 2]; if sprite.flip_x { flip.x *= -1.0; - flip_x = true; } if sprite.flip_y { flip.y *= -1.0; - flip_y = true; } - self.0.iter().map(move |slice| { - let offset = (slice.offset * flip).extend(0.0); - let transform = transform.mul_transform(Transform::from_translation(offset)); - ExtractedSprite { - original_entity: Some(original_entity), - color: sprite.color.into(), - transform, - rect: Some(slice.texture_rect), - custom_size: Some(slice.draw_size), - flip_x, - flip_y, - image_handle_id: sprite.image.id(), - anchor: Self::redepend_anchor_from_sprite_to_slice(sprite, slice), - scaling_mode: sprite.image_mode.scale(), - } + let anchor = sprite.anchor.as_vec() + * sprite + .custom_size + .unwrap_or(sprite.rect.unwrap_or_default().size()); + self.0.iter().map(move |slice| ExtractedSlice { + offset: slice.offset * flip - anchor, + rect: slice.texture_rect, + size: slice.draw_size, }) } - - fn redepend_anchor_from_sprite_to_slice(sprite: &Sprite, slice: &TextureSlice) -> Vec2 { - let sprite_size = sprite - .custom_size - .unwrap_or(sprite.rect.unwrap_or_default().size()); - if sprite_size == Vec2::ZERO { - sprite.anchor.as_vec() - } else { - sprite.anchor.as_vec() * sprite_size / slice.draw_size - } - } } /// Generates sprite slices for a [`Sprite`] with [`SpriteImageMode::Sliced`] or [`SpriteImageMode::Sliced`]. The slices diff --git a/crates/bevy_sprite/src/texture_slice/mod.rs b/crates/bevy_sprite/src/texture_slice/mod.rs index 7ea01d5839..7b1a1e33e2 100644 --- a/crates/bevy_sprite/src/texture_slice/mod.rs +++ b/crates/bevy_sprite/src/texture_slice/mod.rs @@ -27,7 +27,7 @@ impl TextureSlice { /// # Arguments /// /// * `stretch_value` - The slice will repeat when the ratio between the *drawing dimensions* of texture and the - /// *original texture size* (rect) are above `stretch_value`. + /// *original texture size* (rect) are above `stretch_value`. /// * `tile_x` - should the slice be tiled horizontally /// * `tile_y` - should the slice be tiled vertically #[must_use] diff --git a/crates/bevy_sprite/src/texture_slice/slicer.rs b/crates/bevy_sprite/src/texture_slice/slicer.rs index 7250533550..3f8ea1c0b4 100644 --- a/crates/bevy_sprite/src/texture_slice/slicer.rs +++ b/crates/bevy_sprite/src/texture_slice/slicer.rs @@ -1,6 +1,6 @@ use super::{BorderRect, TextureSlice}; use bevy_math::{vec2, Rect, Vec2}; -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; /// Slices a texture using the **9-slicing** technique. This allows to reuse an image at various sizes /// without needing to prepare multiple assets. The associated texture will be split into nine portions, @@ -11,6 +11,7 @@ use bevy_reflect::Reflect; /// /// See [9-sliced](https://en.wikipedia.org/wiki/9-slice_scaling) textures. #[derive(Debug, Clone, Reflect, PartialEq)] +#[reflect(Clone, PartialEq)] pub struct TextureSlicer { /// Inset values in pixels that define the four slicing lines dividing the texture into nine sections. pub border: BorderRect, @@ -24,6 +25,7 @@ pub struct TextureSlicer { /// Defines how a texture slice scales when resized #[derive(Debug, Copy, Clone, Default, Reflect, PartialEq)] +#[reflect(Clone, PartialEq, Default)] pub enum SliceScaleMode { /// The slice will be stretched to fit the area #[default] diff --git a/crates/bevy_state/Cargo.toml b/crates/bevy_state/Cargo.toml index e6ef17c192..1ae52fa571 100644 --- a/crates/bevy_state/Cargo.toml +++ b/crates/bevy_state/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_state" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Finite state machines for Bevy" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -33,7 +33,7 @@ std = [ "bevy_utils/std", "bevy_reflect?/std", "bevy_app?/std", - "bevy_platform_support/std", + "bevy_platform/std", ] ## `critical-section` provides the building blocks for synchronization primitives @@ -43,17 +43,7 @@ critical-section = [ "bevy_utils/critical-section", "bevy_app?/critical-section", "bevy_reflect?/critical-section", - "bevy_platform_support/critical-section", -] - -## `portable-atomic` provides additional platform support for atomic types and -## operations, even on targets without native support. -portable-atomic = [ - "bevy_ecs/portable-atomic", - "bevy_utils/portable-atomic", - "bevy_app?/portable-atomic", - "bevy_reflect?/portable-atomic", - "bevy_platform_support/portable-atomic", + "bevy_platform/critical-section", ] [dependencies] @@ -63,7 +53,7 @@ bevy_state_macros = { path = "macros", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev", default-features = false } bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, optional = true } bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false, optional = true } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false } variadics_please = "1.1" # other diff --git a/crates/bevy_state/macros/Cargo.toml b/crates/bevy_state/macros/Cargo.toml index 2b734d2d1c..2f569f395e 100644 --- a/crates/bevy_state/macros/Cargo.toml +++ b/crates/bevy_state/macros/Cargo.toml @@ -2,7 +2,7 @@ name = "bevy_state_macros" version = "0.16.0-dev" description = "Macros for bevy_state" -edition = "2021" +edition = "2024" license = "MIT OR Apache-2.0" [lib] diff --git a/crates/bevy_state/src/app.rs b/crates/bevy_state/src/app.rs index 2419efd4d8..46a23c9f9a 100644 --- a/crates/bevy_state/src/app.rs +++ b/crates/bevy_state/src/app.rs @@ -1,5 +1,5 @@ use bevy_app::{App, MainScheduleOrder, Plugin, PreStartup, PreUpdate, SubApp}; -use bevy_ecs::{event::Events, schedule::IntoSystemConfigs, world::FromWorld}; +use bevy_ecs::{event::Events, schedule::IntoScheduleConfigs, world::FromWorld}; use bevy_utils::once; use log::warn; diff --git a/crates/bevy_state/src/condition.rs b/crates/bevy_state/src/condition.rs index 1d14d9da07..faede71be5 100644 --- a/crates/bevy_state/src/condition.rs +++ b/crates/bevy_state/src/condition.rs @@ -171,7 +171,7 @@ pub fn state_changed(current_state: Option>>) -> bool { #[cfg(test)] mod tests { - use bevy_ecs::schedule::{Condition, IntoSystemConfigs, Schedule}; + use bevy_ecs::schedule::{Condition, IntoScheduleConfigs, Schedule}; use crate::prelude::*; use bevy_state_macros::States; diff --git a/crates/bevy_state/src/state/freely_mutable_state.rs b/crates/bevy_state/src/state/freely_mutable_state.rs index 2bc186ebe0..aef72e15fa 100644 --- a/crates/bevy_state/src/state/freely_mutable_state.rs +++ b/crates/bevy_state/src/state/freely_mutable_state.rs @@ -1,7 +1,7 @@ use bevy_ecs::{ event::EventWriter, prelude::Schedule, - schedule::{IntoSystemConfigs, IntoSystemSetConfigs}, + schedule::IntoScheduleConfigs, system::{Commands, IntoSystem, ResMut}, }; diff --git a/crates/bevy_state/src/state/state_set.rs b/crates/bevy_state/src/state/state_set.rs index ca167d4970..5199662027 100644 --- a/crates/bevy_state/src/state/state_set.rs +++ b/crates/bevy_state/src/state/state_set.rs @@ -1,6 +1,6 @@ use bevy_ecs::{ event::{EventReader, EventWriter}, - schedule::{IntoSystemConfigs, IntoSystemSetConfigs, Schedule}, + schedule::{IntoScheduleConfigs, Schedule}, system::{Commands, IntoSystem, Res, ResMut}, }; use variadics_please::all_tuples; diff --git a/crates/bevy_state/src/state/states.rs b/crates/bevy_state/src/state/states.rs index 8e2422d46a..163e689f0a 100644 --- a/crates/bevy_state/src/state/states.rs +++ b/crates/bevy_state/src/state/states.rs @@ -21,8 +21,8 @@ use core::hash::Hash; /// /// ``` /// use bevy_state::prelude::*; -/// use bevy_ecs::prelude::IntoSystemConfigs; -/// use bevy_ecs::system::ResMut; +/// use bevy_ecs::prelude::IntoScheduleConfigs; +/// use bevy_ecs::system::{ResMut, ScheduleSystem}; /// /// /// #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Default, States)] @@ -46,7 +46,7 @@ use core::hash::Hash; /// /// # struct AppMock; /// # impl AppMock { -/// # fn add_systems(&mut self, schedule: S, systems: impl IntoSystemConfigs) {} +/// # fn add_systems(&mut self, schedule: S, systems: impl IntoScheduleConfigs) {} /// # } /// # struct Update; /// # let mut app = AppMock; diff --git a/crates/bevy_state/src/state/transitions.rs b/crates/bevy_state/src/state/transitions.rs index 4c4311a9d5..be28926054 100644 --- a/crates/bevy_state/src/state/transitions.rs +++ b/crates/bevy_state/src/state/transitions.rs @@ -2,7 +2,7 @@ use core::{marker::PhantomData, mem}; use bevy_ecs::{ event::{Event, EventReader, EventWriter}, - schedule::{IntoSystemSetConfigs, Schedule, ScheduleLabel, Schedules, SystemSet}, + schedule::{IntoScheduleConfigs, Schedule, ScheduleLabel, Schedules, SystemSet}, system::{Commands, In, ResMut}, world::World, }; @@ -12,13 +12,13 @@ use super::{resources::State, states::States}; /// The label of a [`Schedule`] that **only** runs whenever [`State`] enters the provided state. /// /// This schedule ignores identity transitions. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct OnEnter(pub S); /// The label of a [`Schedule`] that **only** runs whenever [`State`] exits the provided state. /// /// This schedule ignores identity transitions. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct OnExit(pub S); /// The label of a [`Schedule`] that **only** runs whenever [`State`] @@ -27,7 +27,7 @@ pub struct OnExit(pub S); /// Systems added to this schedule are always ran *after* [`OnExit`], and *before* [`OnEnter`]. /// /// This schedule will run on identity transitions. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct OnTransition { /// The state being exited. pub exited: S, @@ -37,7 +37,7 @@ pub struct OnTransition { /// Runs [state transitions](States). /// -/// By default, it will be triggered after `PreUpdate`, but +/// By default, it will be triggered once before [`PreStartup`] and then each frame after [`PreUpdate`], but /// you can manually trigger it at arbitrary times by creating an exclusive /// system to run the schedule. /// @@ -49,7 +49,10 @@ pub struct OnTransition { /// let _ = world.try_run_schedule(StateTransition); /// } /// ``` -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +/// +/// [`PreStartup`]: https://docs.rs/bevy/latest/bevy/prelude/struct.PreStartup.html +/// [`PreUpdate`]: https://docs.rs/bevy/latest/bevy/prelude/struct.PreUpdate.html +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct StateTransition; /// Event sent when any state transition of `S` happens. @@ -145,7 +148,7 @@ pub(crate) fn internal_apply_state_transition( // Transition events are sent even for same state transitions // Although enter and exit schedules are not run by default. - event.send(StateTransitionEvent { + event.write(StateTransitionEvent { exited: Some(exited.clone()), entered: Some(entered.clone()), }); @@ -154,7 +157,7 @@ pub(crate) fn internal_apply_state_transition( // If the [`State`] resource does not exist, we create it, compute dependent states, send a transition event and register the `OnEnter` schedule. commands.insert_resource(State(entered.clone())); - event.send(StateTransitionEvent { + event.write(StateTransitionEvent { exited: None, entered: Some(entered.clone()), }); @@ -166,7 +169,7 @@ pub(crate) fn internal_apply_state_transition( if let Some(resource) = current_state { commands.remove_resource::>(); - event.send(StateTransitionEvent { + event.write(StateTransitionEvent { exited: Some(resource.get().clone()), entered: None, }); diff --git a/crates/bevy_state/src/state_scoped.rs b/crates/bevy_state/src/state_scoped.rs index 747c2705a2..b58017d6e3 100644 --- a/crates/bevy_state/src/state_scoped.rs +++ b/crates/bevy_state/src/state_scoped.rs @@ -20,6 +20,7 @@ use crate::state::{StateTransitionEvent, States}; /// ``` /// use bevy_state::prelude::*; /// use bevy_ecs::prelude::*; +/// use bevy_ecs::system::ScheduleSystem; /// /// #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Default, States)] /// #[states(scoped_entities)] @@ -44,7 +45,7 @@ use crate::state::{StateTransitionEvent, States}; /// # impl AppMock { /// # fn init_state(&mut self) {} /// # fn enable_state_scoped_entities(&mut self) {} -/// # fn add_systems(&mut self, schedule: S, systems: impl IntoSystemConfigs) {} +/// # fn add_systems(&mut self, schedule: S, systems: impl IntoScheduleConfigs) {} /// # } /// # struct Update; /// # let mut app = AppMock; @@ -53,7 +54,7 @@ use crate::state::{StateTransitionEvent, States}; /// app.add_systems(OnEnter(GameState::InGame), spawn_player); /// ``` #[derive(Component, Clone)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Component))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Component, Clone))] pub struct StateScoped(pub S); impl Default for StateScoped diff --git a/crates/bevy_state/src/state_scoped_events.rs b/crates/bevy_state/src/state_scoped_events.rs index 846db41b3f..c84f5c60bf 100644 --- a/crates/bevy_state/src/state_scoped_events.rs +++ b/crates/bevy_state/src/state_scoped_events.rs @@ -8,7 +8,7 @@ use bevy_ecs::{ system::Commands, world::World, }; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use crate::state::{FreelyMutableState, OnExit, StateTransitionEvent}; diff --git a/crates/bevy_tasks/Cargo.toml b/crates/bevy_tasks/Cargo.toml index 1020c6112d..07c20b9750 100644 --- a/crates/bevy_tasks/Cargo.toml +++ b/crates/bevy_tasks/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_tasks" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "A task executor for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -10,27 +10,39 @@ keywords = ["bevy"] [features] default = ["std", "async_executor"] -std = [ - "futures-lite/std", - "async-task/std", - "edge-executor?/std", - "bevy_platform_support/std", -] + +# Functionality + +## Enables multi-threading support. +## Without this feature, all tasks will be run on a single thread. multi_threaded = ["std", "dep:async-channel", "dep:concurrent-queue"] + +## Uses `async-executor` as a task execution backend. +## This backend is incompatible with `no_std` targets. async_executor = ["std", "dep:async-executor"] -edge_executor = ["dep:edge-executor"] -critical-section = [ - "bevy_platform_support/critical-section", - "edge-executor?/critical-section", -] -portable-atomic = [ - "bevy_platform_support/portable-atomic", - "edge-executor?/portable-atomic", - "async-task/portable-atomic", + +# Platform Compatibility + +## Allows access to the `std` crate. Enabling this feature will prevent compilation +## on `no_std` targets, but provides access to certain additional features on +## supported platforms. +std = ["futures-lite/std", "async-task/std", "bevy_platform/std"] + +## `critical-section` provides the building blocks for synchronization primitives +## on all platforms, including `no_std`. +critical-section = ["bevy_platform/critical-section"] + +## Enables use of browser APIs. +## Note this is currently only applicable on `wasm32` architectures. +web = [ + "bevy_platform/web", + "dep:wasm-bindgen-futures", + "dep:pin-project", + "dep:futures-channel", ] [dependencies] -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "alloc", ] } @@ -42,20 +54,31 @@ derive_more = { version = "1", default-features = false, features = [ "deref", "deref_mut", ] } - +cfg-if = "1.0.0" async-executor = { version = "1.11", optional = true } -edge-executor = { version = "0.4.1", default-features = false, optional = true } async-channel = { version = "2.3.0", optional = true } async-io = { version = "2.0.0", optional = true } concurrent-queue = { version = "2.0.0", optional = true } +atomic-waker = { version = "1", default-features = false } +crossbeam-queue = { version = "0.3", default-features = false, features = [ + "alloc", +] } [target.'cfg(target_arch = "wasm32")'.dependencies] -wasm-bindgen-futures = "0.4" -pin-project = "1" -futures-channel = "0.3" +wasm-bindgen-futures = { version = "0.4", optional = true } +pin-project = { version = "1", optional = true } +futures-channel = { version = "0.3", optional = true } -[dev-dependencies] -web-time = { version = "1.1" } +[target.'cfg(not(all(target_has_atomic = "8", target_has_atomic = "16", target_has_atomic = "32", target_has_atomic = "64", target_has_atomic = "ptr")))'.dependencies] +async-task = { version = "4.4.0", default-features = false, features = [ + "portable-atomic", +] } +heapless = { version = "0.8", default-features = false, features = [ + "portable-atomic", +] } +atomic-waker = { version = "1", default-features = false, features = [ + "portable-atomic", +] } [lints] workspace = true diff --git a/crates/bevy_tasks/README.md b/crates/bevy_tasks/README.md index 2af6a606f6..b03d2fcf97 100644 --- a/crates/bevy_tasks/README.md +++ b/crates/bevy_tasks/README.md @@ -36,7 +36,7 @@ The determining factor for what kind of work should go in each pool is latency r ## `no_std` Support -To enable `no_std` support in this crate, you will need to disable default features, and enable the `edge_executor` and `critical-section` features. For platforms without full support for Rust atomics, you may also need to enable the `portable-atomic` feature. +To enable `no_std` support in this crate, you will need to disable default features, and enable the `edge_executor` and `critical-section` features. [bevy]: https://bevyengine.org [rayon]: https://github.com/rayon-rs/rayon diff --git a/crates/bevy_tasks/examples/busy_behavior.rs b/crates/bevy_tasks/examples/busy_behavior.rs index ee92ec3593..8dc56172df 100644 --- a/crates/bevy_tasks/examples/busy_behavior.rs +++ b/crates/bevy_tasks/examples/busy_behavior.rs @@ -2,8 +2,11 @@ //! for 100ms. It's expected to take about a second to run (assuming the machine has >= 4 logical //! cores) +#![expect(clippy::print_stdout, reason = "Allowed in examples.")] + +use bevy_platform::time::Instant; use bevy_tasks::TaskPoolBuilder; -use web_time::{Duration, Instant}; +use core::time::Duration; fn main() { let pool = TaskPoolBuilder::new() diff --git a/crates/bevy_tasks/examples/idle_behavior.rs b/crates/bevy_tasks/examples/idle_behavior.rs index 2887163170..06276e916d 100644 --- a/crates/bevy_tasks/examples/idle_behavior.rs +++ b/crates/bevy_tasks/examples/idle_behavior.rs @@ -2,8 +2,11 @@ //! spinning. Other than the one thread, the system should remain idle, demonstrating good behavior //! for small workloads. +#![expect(clippy::print_stdout, reason = "Allowed in examples.")] + +use bevy_platform::time::Instant; use bevy_tasks::TaskPoolBuilder; -use web_time::{Duration, Instant}; +use core::time::Duration; fn main() { let pool = TaskPoolBuilder::new() diff --git a/crates/bevy_tasks/src/edge_executor.rs b/crates/bevy_tasks/src/edge_executor.rs new file mode 100644 index 0000000000..70e11c8a43 --- /dev/null +++ b/crates/bevy_tasks/src/edge_executor.rs @@ -0,0 +1,652 @@ +//! Alternative to `async_executor` based on [`edge_executor`] by Ivan Markov. +//! +//! It has been vendored along with its tests to update several outdated dependencies. +//! +//! [`async_executor`]: https://github.com/smol-rs/async-executor +//! [`edge_executor`]: https://github.com/ivmarkov/edge-executor + +#![expect(unsafe_code, reason = "original implementation relies on unsafe")] +#![expect( + dead_code, + reason = "keeping methods from original implementation for transparency" +)] + +// TODO: Create a more tailored replacement, possibly integrating [Fotre](https://github.com/NthTensor/Forte) + +use alloc::rc::Rc; +use core::{ + future::{poll_fn, Future}, + marker::PhantomData, + task::{Context, Poll}, +}; + +use async_task::{Runnable, Task}; +use atomic_waker::AtomicWaker; +use bevy_platform::sync::{Arc, LazyLock}; +use futures_lite::FutureExt; + +/// An async executor. +/// +/// # Examples +/// +/// A multi-threaded executor: +/// +/// ```ignore +/// use async_channel::unbounded; +/// use easy_parallel::Parallel; +/// +/// use edge_executor::{Executor, block_on}; +/// +/// let ex: Executor = Default::default(); +/// let (signal, shutdown) = unbounded::<()>(); +/// +/// Parallel::new() +/// // Run four executor threads. +/// .each(0..4, |_| block_on(ex.run(shutdown.recv()))) +/// // Run the main future on the current thread. +/// .finish(|| block_on(async { +/// println!("Hello world!"); +/// drop(signal); +/// })); +/// ``` +pub struct Executor<'a, const C: usize = 64> { + state: LazyLock>>, + _invariant: PhantomData>, +} + +impl<'a, const C: usize> Executor<'a, C> { + /// Creates a new executor. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::Executor; + /// + /// let ex: Executor = Default::default(); + /// ``` + pub const fn new() -> Self { + Self { + state: LazyLock::new(|| Arc::new(State::new())), + _invariant: PhantomData, + } + } + + /// Spawns a task onto the executor. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::Executor; + /// + /// let ex: Executor = Default::default(); + /// + /// let task = ex.spawn(async { + /// println!("Hello world"); + /// }); + /// ``` + /// + /// Note that if the executor's queue size is equal to the number of currently + /// spawned and running tasks, spawning this additional task might cause the executor to panic + /// later, when the task is scheduled for polling. + pub fn spawn(&self, fut: F) -> Task + where + F: Future + Send + 'a, + F::Output: Send + 'a, + { + // SAFETY: Original implementation missing safety documentation + unsafe { self.spawn_unchecked(fut) } + } + + /// Attempts to run a task if at least one is scheduled. + /// + /// Running a scheduled task means simply polling its future once. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::Executor; + /// + /// let ex: Executor = Default::default(); + /// assert!(!ex.try_tick()); // no tasks to run + /// + /// let task = ex.spawn(async { + /// println!("Hello world"); + /// }); + /// assert!(ex.try_tick()); // a task was found + /// ``` + pub fn try_tick(&self) -> bool { + if let Some(runnable) = self.try_runnable() { + runnable.run(); + + true + } else { + false + } + } + + /// Runs a single task asynchronously. + /// + /// Running a task means simply polling its future once. + /// + /// If no tasks are scheduled when this method is called, it will wait until one is scheduled. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::{Executor, block_on}; + /// + /// let ex: Executor = Default::default(); + /// + /// let task = ex.spawn(async { + /// println!("Hello world"); + /// }); + /// block_on(ex.tick()); // runs the task + /// ``` + pub async fn tick(&self) { + self.runnable().await.run(); + } + + /// Runs the executor asynchronously until the given future completes. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::{Executor, block_on}; + /// + /// let ex: Executor = Default::default(); + /// + /// let task = ex.spawn(async { 1 + 2 }); + /// let res = block_on(ex.run(async { task.await * 2 })); + /// + /// assert_eq!(res, 6); + /// ``` + pub async fn run(&self, fut: F) -> F::Output + where + F: Future + Send + 'a, + { + // SAFETY: Original implementation missing safety documentation + unsafe { self.run_unchecked(fut).await } + } + + /// Waits for the next runnable task to run. + async fn runnable(&self) -> Runnable { + poll_fn(|ctx| self.poll_runnable(ctx)).await + } + + /// Polls the first task scheduled for execution by the executor. + fn poll_runnable(&self, ctx: &Context<'_>) -> Poll { + self.state().waker.register(ctx.waker()); + + if let Some(runnable) = self.try_runnable() { + Poll::Ready(runnable) + } else { + Poll::Pending + } + } + + /// Pops the first task scheduled for execution by the executor. + /// + /// Returns + /// - `None` - if no task was scheduled for execution + /// - `Some(Runnnable)` - the first task scheduled for execution. Calling `Runnable::run` will + /// execute the task. In other words, it will poll its future. + fn try_runnable(&self) -> Option { + let runnable; + + #[cfg(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + ))] + { + runnable = self.state().queue.pop(); + } + + #[cfg(not(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + )))] + { + runnable = self.state().queue.dequeue(); + } + + runnable + } + + /// # Safety + /// + /// Original implementation missing safety documentation + unsafe fn spawn_unchecked(&self, fut: F) -> Task + where + F: Future, + { + let schedule = { + let state = self.state().clone(); + + move |runnable| { + #[cfg(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + ))] + { + state.queue.push(runnable).unwrap(); + } + + #[cfg(not(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + )))] + { + state.queue.enqueue(runnable).unwrap(); + } + + if let Some(waker) = state.waker.take() { + waker.wake(); + } + } + }; + + // SAFETY: Original implementation missing safety documentation + let (runnable, task) = unsafe { async_task::spawn_unchecked(fut, schedule) }; + + runnable.schedule(); + + task + } + + /// # Safety + /// + /// Original implementation missing safety documentation + async unsafe fn run_unchecked(&self, fut: F) -> F::Output + where + F: Future, + { + let run_forever = async { + loop { + self.tick().await; + } + }; + + run_forever.or(fut).await + } + + /// Returns a reference to the inner state. + fn state(&self) -> &Arc> { + &self.state + } +} + +impl<'a, const C: usize> Default for Executor<'a, C> { + fn default() -> Self { + Self::new() + } +} + +// SAFETY: Original implementation missing safety documentation +unsafe impl<'a, const C: usize> Send for Executor<'a, C> {} +// SAFETY: Original implementation missing safety documentation +unsafe impl<'a, const C: usize> Sync for Executor<'a, C> {} + +/// A thread-local executor. +/// +/// The executor can only be run on the thread that created it. +/// +/// # Examples +/// +/// ```ignore +/// use edge_executor::{LocalExecutor, block_on}; +/// +/// let local_ex: LocalExecutor = Default::default(); +/// +/// block_on(local_ex.run(async { +/// println!("Hello world!"); +/// })); +/// ``` +pub struct LocalExecutor<'a, const C: usize = 64> { + executor: Executor<'a, C>, + _not_send: PhantomData>>, +} + +impl<'a, const C: usize> LocalExecutor<'a, C> { + /// Creates a single-threaded executor. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::LocalExecutor; + /// + /// let local_ex: LocalExecutor = Default::default(); + /// ``` + pub const fn new() -> Self { + Self { + executor: Executor::::new(), + _not_send: PhantomData, + } + } + + /// Spawns a task onto the executor. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::LocalExecutor; + /// + /// let local_ex: LocalExecutor = Default::default(); + /// + /// let task = local_ex.spawn(async { + /// println!("Hello world"); + /// }); + /// ``` + /// + /// Note that if the executor's queue size is equal to the number of currently + /// spawned and running tasks, spawning this additional task might cause the executor to panic + /// later, when the task is scheduled for polling. + pub fn spawn(&self, fut: F) -> Task + where + F: Future + 'a, + F::Output: 'a, + { + // SAFETY: Original implementation missing safety documentation + unsafe { self.executor.spawn_unchecked(fut) } + } + + /// Attempts to run a task if at least one is scheduled. + /// + /// Running a scheduled task means simply polling its future once. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::LocalExecutor; + /// + /// let local_ex: LocalExecutor = Default::default(); + /// assert!(!local_ex.try_tick()); // no tasks to run + /// + /// let task = local_ex.spawn(async { + /// println!("Hello world"); + /// }); + /// assert!(local_ex.try_tick()); // a task was found + /// ``` + pub fn try_tick(&self) -> bool { + self.executor.try_tick() + } + + /// Runs a single task asynchronously. + /// + /// Running a task means simply polling its future once. + /// + /// If no tasks are scheduled when this method is called, it will wait until one is scheduled. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::{LocalExecutor, block_on}; + /// + /// let local_ex: LocalExecutor = Default::default(); + /// + /// let task = local_ex.spawn(async { + /// println!("Hello world"); + /// }); + /// block_on(local_ex.tick()); // runs the task + /// ``` + pub async fn tick(&self) { + self.executor.tick().await; + } + + /// Runs the executor asynchronously until the given future completes. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::{LocalExecutor, block_on}; + /// + /// let local_ex: LocalExecutor = Default::default(); + /// + /// let task = local_ex.spawn(async { 1 + 2 }); + /// let res = block_on(local_ex.run(async { task.await * 2 })); + /// + /// assert_eq!(res, 6); + /// ``` + pub async fn run(&self, fut: F) -> F::Output + where + F: Future, + { + // SAFETY: Original implementation missing safety documentation + unsafe { self.executor.run_unchecked(fut) }.await + } +} + +impl<'a, const C: usize> Default for LocalExecutor<'a, C> { + fn default() -> Self { + Self::new() + } +} + +struct State { + #[cfg(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + ))] + queue: crossbeam_queue::ArrayQueue, + #[cfg(not(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + )))] + queue: heapless::mpmc::MpMcQueue, + waker: AtomicWaker, +} + +impl State { + fn new() -> Self { + Self { + #[cfg(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + ))] + queue: crossbeam_queue::ArrayQueue::new(C), + #[cfg(not(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + )))] + queue: heapless::mpmc::MpMcQueue::new(), + waker: AtomicWaker::new(), + } + } +} + +#[cfg(test)] +mod different_executor_tests { + use core::cell::Cell; + + use futures_lite::future::{block_on, pending, poll_once}; + use futures_lite::pin; + + use super::LocalExecutor; + + #[test] + fn shared_queue_slot() { + block_on(async { + let was_polled = Cell::new(false); + let future = async { + was_polled.set(true); + pending::<()>().await; + }; + + let ex1: LocalExecutor = Default::default(); + let ex2: LocalExecutor = Default::default(); + + // Start the futures for running forever. + let (run1, run2) = (ex1.run(pending::<()>()), ex2.run(pending::<()>())); + pin!(run1); + pin!(run2); + assert!(poll_once(run1.as_mut()).await.is_none()); + assert!(poll_once(run2.as_mut()).await.is_none()); + + // Spawn the future on executor one and then poll executor two. + ex1.spawn(future).detach(); + assert!(poll_once(run2).await.is_none()); + assert!(!was_polled.get()); + + // Poll the first one. + assert!(poll_once(run1).await.is_none()); + assert!(was_polled.get()); + }); + } +} + +#[cfg(test)] +mod drop_tests { + use alloc::string::String; + use core::mem; + use core::sync::atomic::{AtomicUsize, Ordering}; + use core::task::{Poll, Waker}; + use std::sync::Mutex; + + use bevy_platform::sync::LazyLock; + use futures_lite::future; + + use super::{Executor, Task}; + + #[test] + fn leaked_executor_leaks_everything() { + static DROP: AtomicUsize = AtomicUsize::new(0); + static WAKER: LazyLock>> = LazyLock::new(Default::default); + + let ex: Executor = Default::default(); + + let task = ex.spawn(async { + let _guard = CallOnDrop(|| { + DROP.fetch_add(1, Ordering::SeqCst); + }); + + future::poll_fn(|cx| { + *WAKER.lock().unwrap() = Some(cx.waker().clone()); + Poll::Pending::<()> + }) + .await; + }); + + future::block_on(ex.tick()); + assert!(WAKER.lock().unwrap().is_some()); + assert_eq!(DROP.load(Ordering::SeqCst), 0); + + mem::forget(ex); + assert_eq!(DROP.load(Ordering::SeqCst), 0); + + assert!(future::block_on(future::poll_once(task)).is_none()); + assert_eq!(DROP.load(Ordering::SeqCst), 0); + } + + #[test] + fn await_task_after_dropping_executor() { + let s: String = "hello".into(); + + let ex: Executor = Default::default(); + let task: Task<&str> = ex.spawn(async { &*s }); + assert!(ex.try_tick()); + + drop(ex); + assert_eq!(future::block_on(task), "hello"); + drop(s); + } + + #[test] + fn drop_executor_and_then_drop_finished_task() { + static DROP: AtomicUsize = AtomicUsize::new(0); + + let ex: Executor = Default::default(); + let task = ex.spawn(async { + CallOnDrop(|| { + DROP.fetch_add(1, Ordering::SeqCst); + }) + }); + assert!(ex.try_tick()); + + assert_eq!(DROP.load(Ordering::SeqCst), 0); + drop(ex); + assert_eq!(DROP.load(Ordering::SeqCst), 0); + drop(task); + assert_eq!(DROP.load(Ordering::SeqCst), 1); + } + + #[test] + fn drop_finished_task_and_then_drop_executor() { + static DROP: AtomicUsize = AtomicUsize::new(0); + + let ex: Executor = Default::default(); + let task = ex.spawn(async { + CallOnDrop(|| { + DROP.fetch_add(1, Ordering::SeqCst); + }) + }); + assert!(ex.try_tick()); + + assert_eq!(DROP.load(Ordering::SeqCst), 0); + drop(task); + assert_eq!(DROP.load(Ordering::SeqCst), 1); + drop(ex); + assert_eq!(DROP.load(Ordering::SeqCst), 1); + } + + struct CallOnDrop(F); + + impl Drop for CallOnDrop { + fn drop(&mut self) { + (self.0)(); + } + } +} + +#[cfg(test)] +mod local_queue { + use alloc::boxed::Box; + + use futures_lite::{future, pin}; + + use super::Executor; + + #[test] + fn two_queues() { + future::block_on(async { + // Create an executor with two runners. + let ex: Executor = Default::default(); + let (run1, run2) = ( + ex.run(future::pending::<()>()), + ex.run(future::pending::<()>()), + ); + let mut run1 = Box::pin(run1); + pin!(run2); + + // Poll them both. + assert!(future::poll_once(run1.as_mut()).await.is_none()); + assert!(future::poll_once(run2.as_mut()).await.is_none()); + + // Drop the first one, which should leave the local queue in the `None` state. + drop(run1); + assert!(future::poll_once(run2.as_mut()).await.is_none()); + }); + } +} diff --git a/crates/bevy_tasks/src/executor.rs b/crates/bevy_tasks/src/executor.rs index 3c18ccd897..9a9f4f9dfa 100644 --- a/crates/bevy_tasks/src/executor.rs +++ b/crates/bevy_tasks/src/executor.rs @@ -14,21 +14,19 @@ use core::{ }; use derive_more::{Deref, DerefMut}; +cfg_if::cfg_if! { + if #[cfg(feature = "async_executor")] { + type ExecutorInner<'a> = async_executor::Executor<'a>; + type LocalExecutorInner<'a> = async_executor::LocalExecutor<'a>; + } else { + type ExecutorInner<'a> = crate::edge_executor::Executor<'a, 64>; + type LocalExecutorInner<'a> = crate::edge_executor::LocalExecutor<'a, 64>; + } +} + #[cfg(all(feature = "multi_threaded", not(target_arch = "wasm32")))] pub use async_task::FallibleTask; -#[cfg(feature = "async_executor")] -type ExecutorInner<'a> = async_executor::Executor<'a>; - -#[cfg(feature = "async_executor")] -type LocalExecutorInner<'a> = async_executor::LocalExecutor<'a>; - -#[cfg(all(not(feature = "async_executor"), feature = "edge_executor"))] -type ExecutorInner<'a> = edge_executor::Executor<'a, 64>; - -#[cfg(all(not(feature = "async_executor"), feature = "edge_executor"))] -type LocalExecutorInner<'a> = edge_executor::LocalExecutor<'a, 64>; - /// Wrapper around a multi-threading-aware async executor. /// Spawning will generally require tasks to be `Send` and `Sync` to allow multiple /// threads to send/receive/advance tasks. diff --git a/crates/bevy_tasks/src/lib.rs b/crates/bevy_tasks/src/lib.rs index 220f3dcae2..ae684a4eb5 100644 --- a/crates/bevy_tasks/src/lib.rs +++ b/crates/bevy_tasks/src/lib.rs @@ -11,30 +11,28 @@ extern crate std; extern crate alloc; -#[cfg(not(any(feature = "async_executor", feature = "edge_executor")))] -compile_error!("Either of the `async_executor` or the `edge_executor` features must be enabled."); - -#[cfg(not(target_arch = "wasm32"))] mod conditional_send { - /// Use [`ConditionalSend`] to mark an optional Send trait bound. Useful as on certain platforms (eg. Wasm), - /// futures aren't Send. - pub trait ConditionalSend: Send {} - impl ConditionalSend for T {} -} - -#[cfg(target_arch = "wasm32")] -#[expect(missing_docs, reason = "Not all docs are written yet (#3492).")] -mod conditional_send { - pub trait ConditionalSend {} - impl ConditionalSend for T {} + cfg_if::cfg_if! { + if #[cfg(target_arch = "wasm32")] { + /// Use [`ConditionalSend`] to mark an optional Send trait bound. Useful as on certain platforms (eg. Wasm), + /// futures aren't Send. + pub trait ConditionalSend {} + impl ConditionalSend for T {} + } else { + /// Use [`ConditionalSend`] to mark an optional Send trait bound. Useful as on certain platforms (eg. Wasm), + /// futures aren't Send. + pub trait ConditionalSend: Send {} + impl ConditionalSend for T {} + } + } } pub use conditional_send::*; /// Use [`ConditionalSendFuture`] for a future with an optional Send trait bound, as on certain platforms (eg. Wasm), /// futures aren't Send. -pub trait ConditionalSendFuture: core::future::Future + ConditionalSend {} -impl ConditionalSendFuture for T {} +pub trait ConditionalSendFuture: Future + ConditionalSend {} +impl ConditionalSendFuture for T {} use alloc::boxed::Box; @@ -43,44 +41,48 @@ pub type BoxedFuture<'a, T> = core::pin::Pin usize { - std::thread::available_parallelism() - .map(NonZero::::get) - .unwrap_or(1) -} - -/// Gets the logical CPU core count available to the current process. -/// -/// This will always return at least 1. -#[cfg(not(feature = "std"))] -pub fn available_parallelism() -> usize { - // Without access to std, assume a single thread is available - 1 + /// Gets the logical CPU core count available to the current process. + /// + /// This is identical to [`std::thread::available_parallelism`], except + /// it will return a default value of 1 if it internally errors out. + /// + /// This will always return at least 1. + pub fn available_parallelism() -> usize { + std::thread::available_parallelism() + .map(NonZero::::get) + .unwrap_or(1) + } + } else { + /// Gets the logical CPU core count available to the current process. + /// + /// This will always return at least 1. + pub fn available_parallelism() -> usize { + // Without access to std, assume a single thread is available + 1 + } + } } diff --git a/crates/bevy_tasks/src/single_threaded_task_pool.rs b/crates/bevy_tasks/src/single_threaded_task_pool.rs index fc1a73e754..0f9488bcd0 100644 --- a/crates/bevy_tasks/src/single_threaded_task_pool.rs +++ b/crates/bevy_tasks/src/single_threaded_task_pool.rs @@ -1,5 +1,5 @@ use alloc::{string::String, vec::Vec}; -use bevy_platform_support::sync::Arc; +use bevy_platform::sync::Arc; use core::{cell::RefCell, future::Future, marker::PhantomData, mem}; use crate::Task; @@ -8,52 +8,14 @@ use crate::Task; use std::thread_local; #[cfg(not(feature = "std"))] -use bevy_platform_support::sync::{Mutex, PoisonError}; +use bevy_platform::sync::{Mutex, PoisonError}; -#[cfg(all( - feature = "std", - any(feature = "async_executor", feature = "edge_executor") -))] +#[cfg(feature = "std")] use crate::executor::LocalExecutor; -#[cfg(all( - not(feature = "std"), - any(feature = "async_executor", feature = "edge_executor") -))] +#[cfg(not(feature = "std"))] use crate::executor::Executor as LocalExecutor; -#[cfg(not(any(feature = "async_executor", feature = "edge_executor")))] -mod dummy_executor { - use async_task::Task; - use core::{future::Future, marker::PhantomData}; - - /// Dummy implementation of a `LocalExecutor` to allow for a cleaner compiler error - /// due to missing feature flags. - #[doc(hidden)] - #[derive(Debug)] - pub struct LocalExecutor<'a>(PhantomData); - - impl<'a> LocalExecutor<'a> { - /// Dummy implementation - pub const fn new() -> Self { - Self(PhantomData) - } - - /// Dummy implementation - pub fn try_tick(&self) -> bool { - unimplemented!() - } - - /// Dummy implementation - pub fn spawn(&self, _: impl Future + 'a) -> Task { - unimplemented!() - } - } -} - -#[cfg(not(any(feature = "async_executor", feature = "edge_executor")))] -use dummy_executor::LocalExecutor; - #[cfg(feature = "std")] thread_local! { static LOCAL_EXECUTOR: LocalExecutor<'static> = const { LocalExecutor::new() }; @@ -237,26 +199,27 @@ impl TaskPool { where T: 'static + MaybeSend + MaybeSync, { - #[cfg(target_arch = "wasm32")] - return Task::wrap_future(future); + cfg_if::cfg_if! { + if #[cfg(all(target_arch = "wasm32", feature = "web"))] { + Task::wrap_future(future) + } else if #[cfg(feature = "std")] { + LOCAL_EXECUTOR.with(|executor| { + let task = executor.spawn(future); + // Loop until all tasks are done + while executor.try_tick() {} - #[cfg(all(not(target_arch = "wasm32"), feature = "std"))] - return LOCAL_EXECUTOR.with(|executor| { - let task = executor.spawn(future); - // Loop until all tasks are done - while executor.try_tick() {} + Task::new(task) + }) + } else { + { + let task = LOCAL_EXECUTOR.spawn(future); + // Loop until all tasks are done + while LOCAL_EXECUTOR.try_tick() {} - Task::new(task) - }); - - #[cfg(all(not(target_arch = "wasm32"), not(feature = "std")))] - return { - let task = LOCAL_EXECUTOR.spawn(future); - // Loop until all tasks are done - while LOCAL_EXECUTOR.try_tick() {} - - Task::new(task) - }; + Task::new(task) + } + } + } } /// Spawns a static future on the JS event loop. This is exactly the same as [`TaskPool::spawn`]. diff --git a/crates/bevy_tasks/src/task_pool.rs b/crates/bevy_tasks/src/task_pool.rs index b9c0e9f982..25255a1e5d 100644 --- a/crates/bevy_tasks/src/task_pool.rs +++ b/crates/bevy_tasks/src/task_pool.rs @@ -6,7 +6,7 @@ use std::{ }; use crate::executor::FallibleTask; -use bevy_platform_support::sync::Arc; +use bevy_platform::sync::Arc; use concurrent_queue::ConcurrentQueue; use futures_lite::FutureExt; @@ -74,16 +74,20 @@ impl TaskPoolBuilder { /// This is called on the thread itself and has access to all thread-local storage. /// This will block running async tasks on the thread until the callback completes. pub fn on_thread_spawn(mut self, f: impl Fn() + Send + Sync + 'static) -> Self { - #[cfg(feature = "portable-atomic")] - let arc = { - let boxed = Box::new(f); - let boxed: Box = boxed; - Arc::from(boxed) - }; - - #[cfg(not(feature = "portable-atomic"))] let arc = Arc::new(f); + #[cfg(not(target_has_atomic = "ptr"))] + #[expect( + unsafe_code, + reason = "unsized coercion is an unstable feature for non-std types" + )] + // SAFETY: + // - Coercion from `impl Fn` to `dyn Fn` is valid + // - `Arc::from_raw` receives a valid pointer from a previous call to `Arc::into_raw` + let arc = unsafe { + Arc::from_raw(Arc::into_raw(arc) as *const (dyn Fn() + Send + Sync + 'static)) + }; + self.on_thread_spawn = Some(arc); self } @@ -93,16 +97,20 @@ impl TaskPoolBuilder { /// This is called on the thread itself and has access to all thread-local storage. /// This will block thread termination until the callback completes. pub fn on_thread_destroy(mut self, f: impl Fn() + Send + Sync + 'static) -> Self { - #[cfg(feature = "portable-atomic")] - let arc = { - let boxed = Box::new(f); - let boxed: Box = boxed; - Arc::from(boxed) - }; - - #[cfg(not(feature = "portable-atomic"))] let arc = Arc::new(f); + #[cfg(not(target_has_atomic = "ptr"))] + #[expect( + unsafe_code, + reason = "unsized coercion is an unstable feature for non-std types" + )] + // SAFETY: + // - Coercion from `impl Fn` to `dyn Fn` is valid + // - `Arc::from_raw` receives a valid pointer from a previous call to `Arc::into_raw` + let arc = unsafe { + Arc::from_raw(Arc::into_raw(arc) as *const (dyn Fn() + Send + Sync + 'static)) + }; + self.on_thread_destroy = Some(arc); self } @@ -478,7 +486,7 @@ impl TaskPool { .is_ok(); } }; - execute_forever.or(get_results).await + get_results.or(execute_forever).await } #[inline] @@ -497,7 +505,7 @@ impl TaskPool { let _result = AssertUnwindSafe(tick_forever).catch_unwind().await.is_ok(); } }; - execute_forever.or(get_results).await + get_results.or(execute_forever).await } #[inline] @@ -519,7 +527,7 @@ impl TaskPool { .is_ok(); } }; - execute_forever.or(get_results).await + get_results.or(execute_forever).await } #[inline] @@ -537,7 +545,7 @@ impl TaskPool { let _result = AssertUnwindSafe(tick_forever).catch_unwind().await.is_ok(); } }; - execute_forever.or(get_results).await + get_results.or(execute_forever).await } /// Spawns a static future onto the thread pool. The returned [`Task`] is a diff --git a/crates/bevy_tasks/src/usages.rs b/crates/bevy_tasks/src/usages.rs index 82da333ef4..8b08d5941c 100644 --- a/crates/bevy_tasks/src/usages.rs +++ b/crates/bevy_tasks/src/usages.rs @@ -1,5 +1,5 @@ use super::TaskPool; -use bevy_platform_support::sync::OnceLock; +use bevy_platform::sync::OnceLock; use core::ops::Deref; macro_rules! taskpool { @@ -81,7 +81,7 @@ taskpool! { /// # Warning /// /// This function *must* be called on the main thread, or the task pools will not be updated appropriately. -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(all(target_arch = "wasm32", feature = "web")))] pub fn tick_global_task_pools_on_main_thread() { COMPUTE_TASK_POOL .get() diff --git a/crates/bevy_text/Cargo.toml b/crates/bevy_text/Cargo.toml index a5e8dea07d..2a1a40a74f 100644 --- a/crates/bevy_text/Cargo.toml +++ b/crates/bevy_text/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_text" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides text functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -21,21 +21,19 @@ bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } bevy_log = { path = "../bevy_log", version = "0.16.0-dev" } bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ - "bevy", -] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } bevy_sprite = { path = "../bevy_sprite", version = "0.16.0-dev" } bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } bevy_window = { path = "../bevy_window", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", "serialize", ] } # other -cosmic-text = { version = "0.12", features = ["shape-run-cache"] } +cosmic-text = { version = "0.13", features = ["shape-run-cache"] } thiserror = { version = "2", default-features = false } serde = { version = "1", features = ["derive"] } smallvec = "1.13" diff --git a/crates/bevy_text/src/bounds.rs b/crates/bevy_text/src/bounds.rs index 98d42e3a34..db2ceb0b28 100644 --- a/crates/bevy_text/src/bounds.rs +++ b/crates/bevy_text/src/bounds.rs @@ -11,7 +11,7 @@ use bevy_reflect::{std_traits::ReflectDefault, Reflect}; /// reliable limit if it is necessary to contain the text strictly in the bounds. Currently this /// component is mainly useful for text wrapping only. #[derive(Component, Copy, Clone, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct TextBounds { /// The maximum width of text in logical pixels. /// If `None`, the width is unbounded. diff --git a/crates/bevy_text/src/font_atlas.rs b/crates/bevy_text/src/font_atlas.rs index 793cec0d2f..a10dee5923 100644 --- a/crates/bevy_text/src/font_atlas.rs +++ b/crates/bevy_text/src/font_atlas.rs @@ -1,7 +1,7 @@ use bevy_asset::{Assets, Handle}; use bevy_image::{prelude::*, ImageSampler}; use bevy_math::{IVec2, UVec2}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_render::{ render_asset::RenderAssetUsages, render_resource::{Extent3d, TextureDimension, TextureFormat}, @@ -97,7 +97,7 @@ impl FontAtlas { let atlas_layout = atlas_layouts.get_mut(&self.texture_atlas).unwrap(); let atlas_texture = textures.get_mut(&self.texture).unwrap(); - if let Some(glyph_index) = + if let Ok(glyph_index) = self.dynamic_texture_atlas_builder .add_texture(atlas_layout, texture, atlas_texture) { diff --git a/crates/bevy_text/src/font_atlas_set.rs b/crates/bevy_text/src/font_atlas_set.rs index 92b71ef535..1a498127ba 100644 --- a/crates/bevy_text/src/font_atlas_set.rs +++ b/crates/bevy_text/src/font_atlas_set.rs @@ -2,7 +2,7 @@ use bevy_asset::{Asset, AssetEvent, AssetId, Assets}; use bevy_ecs::{event::EventReader, resource::Resource, system::ResMut}; use bevy_image::prelude::*; use bevy_math::{IVec2, UVec2}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::TypePath; use bevy_render::{ render_asset::RenderAssetUsages, diff --git a/crates/bevy_text/src/glyph.rs b/crates/bevy_text/src/glyph.rs index 6de501266c..c761bc0033 100644 --- a/crates/bevy_text/src/glyph.rs +++ b/crates/bevy_text/src/glyph.rs @@ -11,6 +11,7 @@ use bevy_reflect::Reflect; /// /// Used in [`TextPipeline::queue_text`](crate::TextPipeline::queue_text) and [`crate::TextLayoutInfo`] for rendering glyphs. #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub struct PositionedGlyph { /// The position of the glyph in the text block's bounding box. pub position: Vec2, @@ -20,24 +21,12 @@ pub struct PositionedGlyph { pub atlas_info: GlyphAtlasInfo, /// The index of the glyph in the [`ComputedTextBlock`](crate::ComputedTextBlock)'s tracked spans. pub span_index: usize, - /// TODO: In order to do text editing, we need access to the size of glyphs and their index in the associated String. - /// For example, to figure out where to place the cursor in an input box from the mouse's position. - /// Without this, it's only possible in texts where each glyph is one byte. Cosmic text has methods for this - /// cosmic-texts [hit detection](https://pop-os.github.io/cosmic-text/cosmic_text/struct.Buffer.html#method.hit) - byte_index: usize, -} - -impl PositionedGlyph { - /// Creates a new [`PositionedGlyph`] - pub fn new(position: Vec2, size: Vec2, atlas_info: GlyphAtlasInfo, span_index: usize) -> Self { - Self { - position, - size, - atlas_info, - span_index, - byte_index: 0, - } - } + /// The index of the glyph's line. + pub line_index: usize, + /// The byte index of the glyph in it's line. + pub byte_index: usize, + /// The byte length of the glyph. + pub byte_length: usize, } /// Information about a glyph in an atlas. @@ -47,6 +36,7 @@ impl PositionedGlyph { /// /// Used in [`PositionedGlyph`] and [`FontAtlasSet`](crate::FontAtlasSet). #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub struct GlyphAtlasInfo { /// A handle to the [`Image`] data for the texture atlas this glyph was placed in. /// @@ -65,6 +55,7 @@ pub struct GlyphAtlasInfo { /// /// Used in [`GlyphAtlasInfo`] and [`FontAtlas`](crate::FontAtlas). #[derive(Debug, Clone, Copy, Reflect)] +#[reflect(Clone)] pub struct GlyphAtlasLocation { /// The index of the glyph in the atlas pub glyph_index: usize, diff --git a/crates/bevy_text/src/pipeline.rs b/crates/bevy_text/src/pipeline.rs index e7774569cf..c1f9ca7fe4 100644 --- a/crates/bevy_text/src/pipeline.rs +++ b/crates/bevy_text/src/pipeline.rs @@ -10,7 +10,7 @@ use bevy_ecs::{ use bevy_image::prelude::*; use bevy_log::{once, warn}; use bevy_math::{UVec2, Vec2}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use cosmic_text::{Attrs, Buffer, Family, Metrics, Shaping, Wrap}; @@ -185,13 +185,14 @@ impl TextPipeline { }, ); - buffer.set_rich_text(font_system, spans_iter, Attrs::new(), Shaping::Advanced); + buffer.set_rich_text( + font_system, + spans_iter, + Attrs::new(), + Shaping::Advanced, + Some(justify.into()), + ); - // PERF: https://github.com/pop-os/cosmic-text/issues/166: - // Setting alignment afterwards appears to invalidate some layouting performed by `set_text` which is presumably not free? - for buffer_line in buffer.lines.iter_mut() { - buffer_line.set_align(Some(justify.into())); - } buffer.shape_until_scroll(font_system, false); // Workaround for alignment not working for unbounded text. @@ -263,77 +264,84 @@ impl TextPipeline { let buffer = &mut computed.buffer; let box_size = buffer_dimensions(buffer); - let result = buffer - .layout_runs() - .flat_map(|run| { - run.glyphs - .iter() - .map(move |layout_glyph| (layout_glyph, run.line_y)) - }) - .try_for_each(|(layout_glyph, line_y)| { - let mut temp_glyph; - let span_index = layout_glyph.metadata; - let font_id = glyph_info[span_index].0; - let font_smoothing = glyph_info[span_index].1; + let result = buffer.layout_runs().try_for_each(|run| { + let result = run + .glyphs + .iter() + .map(move |layout_glyph| (layout_glyph, run.line_y, run.line_i)) + .try_for_each(|(layout_glyph, line_y, line_i)| { + let mut temp_glyph; + let span_index = layout_glyph.metadata; + let font_id = glyph_info[span_index].0; + let font_smoothing = glyph_info[span_index].1; - let layout_glyph = if font_smoothing == FontSmoothing::None { - // If font smoothing is disabled, round the glyph positions and sizes, - // effectively discarding all subpixel layout. - temp_glyph = layout_glyph.clone(); - temp_glyph.x = temp_glyph.x.round(); - temp_glyph.y = temp_glyph.y.round(); - temp_glyph.w = temp_glyph.w.round(); - temp_glyph.x_offset = temp_glyph.x_offset.round(); - temp_glyph.y_offset = temp_glyph.y_offset.round(); - temp_glyph.line_height_opt = temp_glyph.line_height_opt.map(f32::round); + let layout_glyph = if font_smoothing == FontSmoothing::None { + // If font smoothing is disabled, round the glyph positions and sizes, + // effectively discarding all subpixel layout. + temp_glyph = layout_glyph.clone(); + temp_glyph.x = temp_glyph.x.round(); + temp_glyph.y = temp_glyph.y.round(); + temp_glyph.w = temp_glyph.w.round(); + temp_glyph.x_offset = temp_glyph.x_offset.round(); + temp_glyph.y_offset = temp_glyph.y_offset.round(); + temp_glyph.line_height_opt = temp_glyph.line_height_opt.map(f32::round); - &temp_glyph - } else { - layout_glyph - }; + &temp_glyph + } else { + layout_glyph + }; - let font_atlas_set = font_atlas_sets.sets.entry(font_id).or_default(); + let font_atlas_set = font_atlas_sets.sets.entry(font_id).or_default(); - let physical_glyph = layout_glyph.physical((0., 0.), 1.); + let physical_glyph = layout_glyph.physical((0., 0.), 1.); - let atlas_info = font_atlas_set - .get_glyph_atlas_info(physical_glyph.cache_key, font_smoothing) - .map(Ok) - .unwrap_or_else(|| { - font_atlas_set.add_glyph_to_atlas( - texture_atlases, - textures, - &mut font_system.0, - &mut swash_cache.0, - layout_glyph, - font_smoothing, - ) - })?; + let atlas_info = font_atlas_set + .get_glyph_atlas_info(physical_glyph.cache_key, font_smoothing) + .map(Ok) + .unwrap_or_else(|| { + font_atlas_set.add_glyph_to_atlas( + texture_atlases, + textures, + &mut font_system.0, + &mut swash_cache.0, + layout_glyph, + font_smoothing, + ) + })?; - let texture_atlas = texture_atlases.get(&atlas_info.texture_atlas).unwrap(); - let location = atlas_info.location; - let glyph_rect = texture_atlas.textures[location.glyph_index]; - let left = location.offset.x as f32; - let top = location.offset.y as f32; - let glyph_size = UVec2::new(glyph_rect.width(), glyph_rect.height()); + let texture_atlas = texture_atlases.get(&atlas_info.texture_atlas).unwrap(); + let location = atlas_info.location; + let glyph_rect = texture_atlas.textures[location.glyph_index]; + let left = location.offset.x as f32; + let top = location.offset.y as f32; + let glyph_size = UVec2::new(glyph_rect.width(), glyph_rect.height()); - // offset by half the size because the origin is center - let x = glyph_size.x as f32 / 2.0 + left + physical_glyph.x as f32; - let y = line_y.round() + physical_glyph.y as f32 - top + glyph_size.y as f32 / 2.0; - let y = match y_axis_orientation { - YAxisOrientation::TopToBottom => y, - YAxisOrientation::BottomToTop => box_size.y - y, - }; + // offset by half the size because the origin is center + let x = glyph_size.x as f32 / 2.0 + left + physical_glyph.x as f32; + let y = + line_y.round() + physical_glyph.y as f32 - top + glyph_size.y as f32 / 2.0; + let y = match y_axis_orientation { + YAxisOrientation::TopToBottom => y, + YAxisOrientation::BottomToTop => box_size.y - y, + }; - let position = Vec2::new(x, y); + let position = Vec2::new(x, y); - // TODO: recreate the byte index, that keeps track of where a cursor is, - // when glyphs are not limited to single byte representation, relevant for #1319 - let pos_glyph = - PositionedGlyph::new(position, glyph_size.as_vec2(), atlas_info, span_index); - layout_info.glyphs.push(pos_glyph); - Ok(()) - }); + let pos_glyph = PositionedGlyph { + position, + size: glyph_size.as_vec2(), + atlas_info, + span_index, + byte_index: layout_glyph.start, + byte_length: layout_glyph.end - layout_glyph.start, + line_index: line_i, + }; + layout_info.glyphs.push(pos_glyph); + Ok(()) + }); + + result + }); // Return the scratch vec. self.glyph_info = glyph_info; @@ -406,7 +414,7 @@ impl TextPipeline { /// Contains scaled glyphs and their size. Generated via [`TextPipeline::queue_text`] when an entity has /// [`TextLayout`] and [`ComputedTextBlock`] components. #[derive(Component, Clone, Default, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct TextLayoutInfo { /// Scaled and positioned glyphs in screenspace pub glyphs: Vec, diff --git a/crates/bevy_text/src/text.rs b/crates/bevy_text/src/text.rs index f564a3078a..faa5d93dc9 100644 --- a/crates/bevy_text/src/text.rs +++ b/crates/bevy_text/src/text.rs @@ -29,7 +29,7 @@ impl Default for CosmicBuffer { /// /// Returned by [`ComputedTextBlock::entities`]. #[derive(Debug, Copy, Clone, Reflect)] -#[reflect(Debug)] +#[reflect(Debug, Clone)] pub struct TextEntity { /// The entity. pub entity: Entity, @@ -43,7 +43,7 @@ pub struct TextEntity { /// /// Automatically updated by 2d and UI text systems. #[derive(Component, Debug, Clone, Reflect)] -#[reflect(Component, Debug, Default)] +#[reflect(Component, Debug, Default, Clone)] pub struct ComputedTextBlock { /// Buffer for managing text layout and creating [`TextLayoutInfo`]. /// @@ -51,7 +51,7 @@ pub struct ComputedTextBlock { /// `TextLayoutInfo`. If you want to control the buffer contents manually or use the `cosmic-text` /// editor, then you need to not use `TextLayout` and instead manually implement the conversion to /// `TextLayoutInfo`. - #[reflect(ignore)] + #[reflect(ignore, clone)] pub(crate) buffer: CosmicBuffer, /// Entities for all text spans in the block, including the root-level text. /// @@ -86,6 +86,16 @@ impl ComputedTextBlock { pub fn needs_rerender(&self) -> bool { self.needs_rerender } + /// Accesses the underlying buffer which can be used for `cosmic-text` APIs such as accessing layout information + /// or calculating a cursor position. + /// + /// Mutable access is not offered because changes would be overwritten during the automated layout calculation. + /// If you want to control the buffer contents manually or use the `cosmic-text` + /// editor, then you need to not use `TextLayout` and instead manually implement the conversion to + /// `TextLayoutInfo`. + pub fn buffer(&self) -> &CosmicBuffer { + &self.buffer + } } impl Default for ComputedTextBlock { @@ -106,7 +116,7 @@ impl Default for ComputedTextBlock { /// /// See [`Text2d`](crate::Text2d) for the core component of 2d text, and `Text` in `bevy_ui` for UI text. #[derive(Component, Debug, Copy, Clone, Default, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require(ComputedTextBlock, TextLayoutInfo)] pub struct TextLayout { /// The text's internal alignment. @@ -198,7 +208,7 @@ impl TextLayout { /// )); /// ``` #[derive(Component, Debug, Default, Clone, Deref, DerefMut, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require(TextFont, TextColor)] pub struct TextSpan(pub String); @@ -240,7 +250,7 @@ impl From for TextSpan { /// _Has no affect on a single line text entity_, unless used together with a /// [`TextBounds`](super::bounds::TextBounds) component with an explicit `width` value. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash, Reflect, Serialize, Deserialize)] -#[reflect(Serialize, Deserialize)] +#[reflect(Serialize, Deserialize, Clone, PartialEq, Hash)] pub enum JustifyText { /// Leftmost character is immediately to the right of the render position. /// Bounds start from the render position and advance rightwards. @@ -272,7 +282,7 @@ impl From for cosmic_text::Align { /// `TextFont` determines the style of a text span within a [`ComputedTextBlock`], specifically /// the font face, the font size, and the color. #[derive(Component, Clone, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct TextFont { /// The specific font face to use, as a `Handle` to a [`Font`] asset. /// @@ -350,7 +360,7 @@ impl Default for TextFont { /// /// Default is 1.2x the font size #[derive(Debug, Clone, Copy, Reflect)] -#[reflect(Debug)] +#[reflect(Debug, Clone)] pub enum LineHeight { /// Set line height to a specific number of pixels Px(f32), @@ -375,7 +385,7 @@ impl Default for LineHeight { /// The color of the text for this section. #[derive(Component, Copy, Clone, Debug, Deref, DerefMut, Reflect, PartialEq)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub struct TextColor(pub Color); impl Default for TextColor { @@ -399,7 +409,7 @@ impl TextColor { /// Determines how lines will be broken when preventing text from running out of bounds. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default, Reflect, Serialize, Deserialize)] -#[reflect(Serialize, Deserialize)] +#[reflect(Serialize, Deserialize, Clone, PartialEq, Hash, Default)] pub enum LineBreak { /// Uses the [Unicode Line Breaking Algorithm](https://www.unicode.org/reports/tr14/). /// Lines will be broken up at the nearest suitable word boundary, usually a space. @@ -422,7 +432,7 @@ pub enum LineBreak { /// /// **Note:** Subpixel antialiasing is not currently supported. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default, Reflect, Serialize, Deserialize)] -#[reflect(Serialize, Deserialize)] +#[reflect(Serialize, Deserialize, Clone, PartialEq, Hash, Default)] #[doc(alias = "antialiasing")] #[doc(alias = "pixelated")] pub enum FontSmoothing { @@ -498,14 +508,14 @@ pub fn detect_text_needs_rerender( // - Span component changed. // - Span TextFont changed. // - Span children changed (can include additions and removals). - for (entity, maybe_span_parent, has_text_block) in changed_spans.iter() { + for (entity, maybe_span_child_of, has_text_block) in changed_spans.iter() { if has_text_block { once!(warn!("found entity {} with a TextSpan that has a TextLayout, which should only be on root \ text entities (that have {}); this warning only prints once", entity, core::any::type_name::())); } - let Some(span_parent) = maybe_span_parent else { + let Some(span_child_of) = maybe_span_child_of else { once!(warn!( "found entity {} with a TextSpan that has no parent; it should have an ancestor \ with a root text component ({}); this warning only prints once", @@ -514,13 +524,13 @@ pub fn detect_text_needs_rerender( )); continue; }; - let mut parent: Entity = span_parent.0; + let mut parent: Entity = span_child_of.parent(); // Search for the nearest ancestor with ComputedTextBlock. // Note: We assume the perf cost from duplicate visits in the case that multiple spans in a block are visited // is outweighed by the expense of tracking visited spans. loop { - let Ok((maybe_parent, maybe_computed, has_span)) = computed.get_mut(parent) else { + let Ok((maybe_child_of, maybe_computed, has_span)) = computed.get_mut(parent) else { once!(warn!("found entity {} with a TextSpan that is part of a broken hierarchy with a ChildOf \ component that points at non-existent entity {}; this warning only prints once", entity, parent)); @@ -536,7 +546,7 @@ pub fn detect_text_needs_rerender( entity, parent)); break; } - let Some(next_parent) = maybe_parent else { + let Some(next_child_of) = maybe_child_of else { once!(warn!( "found entity {} with a TextSpan that has no ancestor with the root text \ component ({}); this warning only prints once", @@ -545,7 +555,7 @@ pub fn detect_text_needs_rerender( )); break; }; - parent = next_parent.0; + parent = next_child_of.parent(); } } } diff --git a/crates/bevy_text/src/text2d.rs b/crates/bevy_text/src/text2d.rs index 952627a6b7..a9419e89c0 100644 --- a/crates/bevy_text/src/text2d.rs +++ b/crates/bevy_text/src/text2d.rs @@ -7,10 +7,10 @@ use crate::{ use bevy_asset::Assets; use bevy_color::LinearRgba; use bevy_derive::{Deref, DerefMut}; -use bevy_ecs::entity::hash_set::EntityHashSet; +use bevy_ecs::entity::EntityHashSet; use bevy_ecs::{ change_detection::{DetectChanges, Ref}, - component::{require, Component}, + component::Component, entity::Entity, prelude::{ReflectComponent, With}, query::{Changed, Without}, @@ -26,7 +26,9 @@ use bevy_render::{ view::{NoFrustumCulling, ViewVisibility}, Extract, }; -use bevy_sprite::{Anchor, ExtractedSprite, ExtractedSprites, Sprite}; +use bevy_sprite::{ + Anchor, ExtractedSlice, ExtractedSlices, ExtractedSprite, ExtractedSprites, Sprite, +}; use bevy_transform::components::Transform; use bevy_transform::prelude::GlobalTransform; use bevy_window::{PrimaryWindow, Window}; @@ -81,7 +83,7 @@ use bevy_window::{PrimaryWindow, Window}; /// }); /// ``` #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require( TextLayout, TextFont, @@ -136,6 +138,7 @@ pub type Text2dWriter<'w, 's> = TextWriter<'w, 's, Text2d>; pub fn extract_text2d_sprite( mut commands: Commands, mut extracted_sprites: ResMut, + mut extracted_slices: ResMut, texture_atlases: Extract>>, windows: Extract>>, text2d_query: Extract< @@ -149,17 +152,20 @@ pub fn extract_text2d_sprite( &GlobalTransform, )>, >, - text_styles: Extract>, + text_colors: Extract>, ) { + let mut start = extracted_slices.slices.len(); + let mut end = start + 1; + // TODO: Support window-independent scaling: https://github.com/bevyengine/bevy/issues/5621 let scale_factor = windows - .get_single() + .single() .map(|window| window.resolution.scale_factor()) .unwrap_or(1.0); let scaling = GlobalTransform::from_scale(Vec2::splat(scale_factor.recip()).extend(1.)); for ( - original_entity, + main_entity, view_visibility, computed_block, text_layout_info, @@ -182,15 +188,19 @@ pub fn extract_text2d_sprite( *global_transform * GlobalTransform::from_translation(bottom_left.extend(0.)) * scaling; let mut color = LinearRgba::WHITE; let mut current_span = usize::MAX; - for PositionedGlyph { - position, - atlas_info, - span_index, - .. - } in &text_layout_info.glyphs + + for ( + i, + PositionedGlyph { + position, + atlas_info, + span_index, + .. + }, + ) in text_layout_info.glyphs.iter().enumerate() { if *span_index != current_span { - color = text_styles + color = text_colors .get( computed_block .entities() @@ -198,30 +208,41 @@ pub fn extract_text2d_sprite( .map(|t| t.entity) .unwrap_or(Entity::PLACEHOLDER), ) - .map(|(_, text_color)| LinearRgba::from(text_color.0)) + .map(|text_color| LinearRgba::from(text_color.0)) .unwrap_or_default(); current_span = *span_index; } - let atlas = texture_atlases.get(&atlas_info.texture_atlas).unwrap(); + let rect = texture_atlases + .get(&atlas_info.texture_atlas) + .unwrap() + .textures[atlas_info.location.glyph_index] + .as_rect(); + extracted_slices.slices.push(ExtractedSlice { + offset: *position, + rect, + size: rect.size(), + }); - extracted_sprites.sprites.insert( - ( - commands.spawn(TemporaryRenderEntity).id(), - original_entity.into(), - ), - ExtractedSprite { - transform: transform * GlobalTransform::from_translation(position.extend(0.)), + if text_layout_info.glyphs.get(i + 1).is_none_or(|info| { + info.span_index != current_span || info.atlas_info.texture != atlas_info.texture + }) { + let render_entity = commands.spawn(TemporaryRenderEntity).id(); + extracted_sprites.sprites.push(ExtractedSprite { + main_entity, + render_entity, + transform, color, - rect: Some(atlas.textures[atlas_info.location.glyph_index].as_rect()), - custom_size: None, image_handle_id: atlas_info.texture.id(), flip_x: false, flip_y: false, - anchor: Anchor::Center.as_vec(), - original_entity: Some(original_entity), - scaling_mode: None, - }, - ); + kind: bevy_sprite::ExtractedSpriteKind::Slices { + indices: start..end, + }, + }); + start = end; + } + + end += 1; } } } @@ -256,7 +277,7 @@ pub fn update_text2d_layout( ) { // TODO: Support window-independent scaling: https://github.com/bevyengine/bevy/issues/5621 let scale_factor = windows - .get_single() + .single() .ok() .map(|window| window.resolution.scale_factor()) .or(*last_scale_factor) @@ -371,7 +392,7 @@ mod tests { use bevy_app::{App, Update}; use bevy_asset::{load_internal_binary_asset, Handle}; - use bevy_ecs::schedule::IntoSystemConfigs; + use bevy_ecs::schedule::IntoScheduleConfigs; use crate::{detect_text_needs_rerender, TextIterScratch}; diff --git a/crates/bevy_time/Cargo.toml b/crates/bevy_time/Cargo.toml index f209e01740..520782b519 100644 --- a/crates/bevy_time/Cargo.toml +++ b/crates/bevy_time/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_time" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides time functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -21,11 +21,7 @@ bevy_reflect = [ ] ## Adds serialization support through `serde`. -serialize = [ - "dep:serde", - "bevy_ecs/serialize", - "bevy_platform_support/serialize", -] +serialize = ["dep:serde", "bevy_ecs/serialize", "bevy_platform/serialize"] # Platform Compatibility @@ -37,7 +33,7 @@ std = [ "bevy_reflect?/std", "bevy_ecs/std", "bevy_app/std", - "bevy_platform_support/std", + "bevy_platform/std", "dep:crossbeam-channel", ] @@ -45,28 +41,17 @@ std = [ ## on all platforms, including `no_std`. critical-section = [ "bevy_ecs/critical-section", - "bevy_platform_support/critical-section", + "bevy_platform/critical-section", "bevy_reflect?/critical-section", "bevy_app/critical-section", ] -## `portable-atomic` provides additional platform support for atomic types and -## operations, even on targets without native support. -portable-atomic = [ - "bevy_ecs/portable-atomic", - "bevy_platform_support/portable-atomic", - "bevy_reflect?/portable-atomic", - "bevy_app/portable-atomic", -] - [dependencies] # bevy bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false } bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev", default-features = false } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, features = [ - "bevy", -], optional = true } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, optional = true } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false } # other crossbeam-channel = { version = "0.5.0", default-features = false, features = [ diff --git a/crates/bevy_time/src/common_conditions.rs b/crates/bevy_time/src/common_conditions.rs index bb9e666319..d944303439 100644 --- a/crates/bevy_time/src/common_conditions.rs +++ b/crates/bevy_time/src/common_conditions.rs @@ -7,7 +7,7 @@ use core::time::Duration; /// /// ```no_run /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, PluginGroup, Update}; -/// # use bevy_ecs::schedule::IntoSystemConfigs; +/// # use bevy_ecs::schedule::IntoScheduleConfigs; /// # use core::time::Duration; /// # use bevy_time::common_conditions::on_timer; /// fn main() { @@ -47,7 +47,7 @@ pub fn on_timer(duration: Duration) -> impl FnMut(Res

() - ); - } - Self::Never => {} - } - } -} - -/// Trait for manipulating warn policy of systems. -#[doc(hidden)] -pub trait WithParamWarnPolicy -where - M: 'static, - F: SystemParamFunction, - Self: Sized, -{ - /// Set warn policy. - fn with_param_warn_policy(self, warn_policy: ParamWarnPolicy) -> FunctionSystem; - - /// Warn and ignore systems with invalid parameters. - fn warn_param_missing(self) -> FunctionSystem { - self.with_param_warn_policy(ParamWarnPolicy::Warn) - } - - /// Silently ignore systems with invalid parameters. - fn ignore_param_missing(self) -> FunctionSystem { - self.with_param_warn_policy(ParamWarnPolicy::Never) - } -} - -impl WithParamWarnPolicy for F -where - M: 'static, - F: SystemParamFunction, -{ - fn with_param_warn_policy(self, param_warn_policy: ParamWarnPolicy) -> FunctionSystem { - let mut system = IntoSystem::into_system(self); - system.system_meta.set_param_warn_policy(param_warn_policy); - system - } -} - // TODO: Actually use this in FunctionSystem. We should probably only do this once Systems are constructed using a World reference // (to avoid the need for unwrapping to retrieve SystemMeta) /// Holds on to persistent state required to drive [`SystemParam`] for a [`System`]. @@ -517,7 +417,10 @@ impl SystemState { /// - The passed [`UnsafeWorldCell`] must have read-only access to /// world data in `archetype_component_access`. /// - `world` must be the same [`World`] that was used to initialize [`state`](SystemParam::init_state). - pub unsafe fn validate_param(state: &Self, world: UnsafeWorldCell) -> bool { + pub unsafe fn validate_param( + state: &Self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { // SAFETY: Delegated to existing `SystemParam` implementations. unsafe { Param::validate_param(&state.param_state, &state.meta, world) } } @@ -847,18 +750,17 @@ where } #[inline] - unsafe fn validate_param_unsafe(&mut self, world: UnsafeWorldCell) -> bool { + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { let param_state = &self.state.as_ref().expect(Self::ERROR_UNINITIALIZED).param; // SAFETY: // - The caller has invoked `update_archetype_component_access`, which will panic // if the world does not match. // - All world accesses used by `F::Param` have been registered, so the caller // will ensure that there are no data access conflicts. - let is_valid = unsafe { F::Param::validate_param(param_state, &self.system_meta, world) }; - if !is_valid { - self.system_meta.advance_param_warn_policy(); - } - is_valid + unsafe { F::Param::validate_param(param_state, &self.system_meta, world) } } #[inline] @@ -1070,6 +972,7 @@ macro_rules! impl_system_function { #[inline] fn run(&mut self, input: In::Inner<'_>, param_value: SystemParamItem< ($($param,)*)>) -> Out { fn call_inner( + _: PhantomData, mut f: impl FnMut(In::Param<'_>, $($param,)*)->Out, input: In::Inner<'_>, $($param: $param,)* @@ -1077,7 +980,7 @@ macro_rules! impl_system_function { f(In::wrap(input), $($param,)*) } let ($($param,)*) = param_value; - call_inner(self, input, $($param),*) + call_inner(PhantomData::, self, input, $($param),*) } } }; diff --git a/crates/bevy_ecs/src/system/mod.rs b/crates/bevy_ecs/src/system/mod.rs index 4fe489d253..1bdd26add2 100644 --- a/crates/bevy_ecs/src/system/mod.rs +++ b/crates/bevy_ecs/src/system/mod.rs @@ -82,7 +82,7 @@ //! # System return type //! //! Systems added to a schedule through [`add_systems`](crate::schedule::Schedule) may either return -//! empty `()` or a [`Result`](crate::result::Result). Other contexts (like one shot systems) allow +//! empty `()` or a [`Result`](crate::error::Result). Other contexts (like one shot systems) allow //! systems to return arbitrary values. //! //! # System parameter list @@ -323,6 +323,7 @@ pub fn assert_system_does_not_conflict>(world: &mut World, system: S) { + fn run_system>( + world: &mut World, + system: S, + ) { let mut schedule = Schedule::default(); schedule.add_systems(system); schedule.run(world); @@ -1648,7 +1655,10 @@ mod tests { #[should_panic] fn panic_inside_system() { let mut world = World::new(); - run_system(&mut world, || panic!("this system panics")); + let system: fn() = || { + panic!("this system panics"); + }; + run_system(&mut world, system); } #[test] @@ -1814,4 +1824,59 @@ mod tests { let mut world = World::new(); run_system(&mut world, sys); } + + // Regression test for + // https://github.com/bevyengine/bevy/issues/18778 + // + // Dear rustc team, please reach out if you encounter this + // in a crater run and we can work something out! + // + // These todo! macro calls should never be removed; + // they're intended to demonstrate real-world usage + // in a way that's clearer than simply calling `panic!` + // + // Because type inference behaves differently for functions and closures, + // we need to test both, in addition to explicitly annotating the return type + // to ensure that there are no upstream regressions there. + #[test] + fn nondiverging_never_trait_impls() { + // This test is a compilation test: + // no meaningful logic is ever actually evaluated. + // It is simply intended to check that the correct traits are implemented + // when todo! or similar nondiverging panics are used. + let mut world = World::new(); + let mut schedule = Schedule::default(); + + fn sys(_query: Query<&Name>) { + todo!() + } + + schedule.add_systems(sys); + schedule.add_systems(|_query: Query<&Name>| {}); + schedule.add_systems(|_query: Query<&Name>| todo!()); + #[expect(clippy::unused_unit, reason = "this forces the () return type")] + schedule.add_systems(|_query: Query<&Name>| -> () { todo!() }); + + fn obs(_trigger: Trigger) { + todo!() + } + + world.add_observer(obs); + world.add_observer(|_trigger: Trigger| {}); + world.add_observer(|_trigger: Trigger| todo!()); + #[expect(clippy::unused_unit, reason = "this forces the () return type")] + world.add_observer(|_trigger: Trigger| -> () { todo!() }); + + fn my_command(_world: &mut World) { + todo!() + } + + world.commands().queue(my_command); + world.commands().queue(|_world: &mut World| {}); + world.commands().queue(|_world: &mut World| todo!()); + #[expect(clippy::unused_unit, reason = "this forces the () return type")] + world + .commands() + .queue(|_world: &mut World| -> () { todo!() }); + } } diff --git a/crates/bevy_ecs/src/system/observer_system.rs b/crates/bevy_ecs/src/system/observer_system.rs index 5dca4f4497..d042154631 100644 --- a/crates/bevy_ecs/src/system/observer_system.rs +++ b/crates/bevy_ecs/src/system/observer_system.rs @@ -1,22 +1,28 @@ +use alloc::{borrow::Cow, vec::Vec}; +use core::marker::PhantomData; + use crate::{ + archetype::ArchetypeComponentId, + component::{ComponentId, Tick}, + error::Result, + never::Never, prelude::{Bundle, Trigger}, - system::System, + query::Access, + schedule::{Fallible, Infallible}, + system::{input::SystemIn, System}, + world::{unsafe_world_cell::UnsafeWorldCell, DeferredWorld, World}, }; -use super::IntoSystem; +use super::{IntoSystem, SystemParamValidationError}; /// Implemented for [`System`]s that have a [`Trigger`] as the first argument. -pub trait ObserverSystem: +pub trait ObserverSystem: System, Out = Out> + Send + 'static { } -impl< - E: 'static, - B: Bundle, - Out, - T: System, Out = Out> + Send + 'static, - > ObserverSystem for T +impl ObserverSystem for T where + T: System, Out = Out> + Send + 'static { } @@ -32,7 +38,7 @@ impl< label = "the trait `IntoObserverSystem` is not implemented", note = "for function `ObserverSystem`s, ensure the first argument is a `Trigger` and any subsequent ones are `SystemParam`" )] -pub trait IntoObserverSystem: Send + 'static { +pub trait IntoObserverSystem: Send + 'static { /// The type of [`System`] that this instance converts into. type System: ObserverSystem; @@ -40,23 +46,160 @@ pub trait IntoObserverSystem: Send + 'static fn into_system(this: Self) -> Self::System; } -impl< - S: IntoSystem, Out, M> + Send + 'static, - M, - Out, - E: 'static, - B: Bundle, - > IntoObserverSystem for S +impl IntoObserverSystem for S where + S: IntoSystem, Out, M> + Send + 'static, S::System: ObserverSystem, + E: 'static, + B: Bundle, { - type System = , Out, M>>::System; + type System = S::System; fn into_system(this: Self) -> Self::System { IntoSystem::into_system(this) } } +impl IntoObserverSystem for S +where + S: IntoSystem, (), M> + Send + 'static, + S::System: ObserverSystem, + E: Send + Sync + 'static, + B: Bundle, +{ + type System = InfallibleObserverWrapper; + + fn into_system(this: Self) -> Self::System { + InfallibleObserverWrapper::new(IntoSystem::into_system(this)) + } +} +impl IntoObserverSystem for S +where + S: IntoSystem, Never, M> + Send + 'static, + E: Send + Sync + 'static, + B: Bundle, +{ + type System = InfallibleObserverWrapper; + + fn into_system(this: Self) -> Self::System { + InfallibleObserverWrapper::new(IntoSystem::into_system(this)) + } +} + +/// A wrapper that converts an observer system that returns `()` into one that returns `Ok(())`. +pub struct InfallibleObserverWrapper { + observer: S, + _marker: PhantomData<(E, B, Out)>, +} + +impl InfallibleObserverWrapper { + /// Create a new `InfallibleObserverWrapper`. + pub fn new(observer: S) -> Self { + Self { + observer, + _marker: PhantomData, + } + } +} + +impl System for InfallibleObserverWrapper +where + S: ObserverSystem, + E: Send + Sync + 'static, + B: Bundle, + Out: Send + Sync + 'static, +{ + type In = Trigger<'static, E, B>; + type Out = Result; + + #[inline] + fn name(&self) -> Cow<'static, str> { + self.observer.name() + } + + #[inline] + fn component_access(&self) -> &Access { + self.observer.component_access() + } + + #[inline] + fn archetype_component_access(&self) -> &Access { + self.observer.archetype_component_access() + } + + #[inline] + fn is_send(&self) -> bool { + self.observer.is_send() + } + + #[inline] + fn is_exclusive(&self) -> bool { + self.observer.is_exclusive() + } + + #[inline] + fn has_deferred(&self) -> bool { + self.observer.has_deferred() + } + + #[inline] + unsafe fn run_unsafe( + &mut self, + input: SystemIn<'_, Self>, + world: UnsafeWorldCell, + ) -> Self::Out { + self.observer.run_unsafe(input, world); + Ok(()) + } + + #[inline] + fn apply_deferred(&mut self, world: &mut World) { + self.observer.apply_deferred(world); + } + + #[inline] + fn queue_deferred(&mut self, world: DeferredWorld) { + self.observer.queue_deferred(world); + } + + #[inline] + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { + self.observer.validate_param_unsafe(world) + } + + #[inline] + fn initialize(&mut self, world: &mut World) { + self.observer.initialize(world); + } + + #[inline] + fn update_archetype_component_access(&mut self, world: UnsafeWorldCell) { + self.observer.update_archetype_component_access(world); + } + + #[inline] + fn check_change_tick(&mut self, change_tick: Tick) { + self.observer.check_change_tick(change_tick); + } + + #[inline] + fn get_last_run(&self) -> Tick { + self.observer.get_last_run() + } + + #[inline] + fn set_last_run(&mut self, last_run: Tick) { + self.observer.set_last_run(last_run); + } + + fn default_system_sets(&self) -> Vec { + self.observer.default_system_sets() + } +} + #[cfg(test)] mod tests { use crate::{ diff --git a/crates/bevy_ecs/src/system/query.rs b/crates/bevy_ecs/src/system/query.rs index ecca479d3b..183bdecfb4 100644 --- a/crates/bevy_ecs/src/system/query.rs +++ b/crates/bevy_ecs/src/system/query.rs @@ -1,46 +1,54 @@ use crate::{ batching::BatchingStrategy, component::Tick, - entity::{Entity, EntityBorrow, EntitySet}, + entity::{Entity, EntityDoesNotExistError, EntityEquivalent, EntitySet, UniqueEntityArray}, query::{ - QueryCombinationIter, QueryData, QueryEntityError, QueryFilter, QueryIter, QueryManyIter, - QueryManyUniqueIter, QueryParIter, QuerySingleError, QueryState, ROQueryItem, - ReadOnlyQueryData, + DebugCheckedUnwrap, NopWorldQuery, QueryCombinationIter, QueryData, QueryEntityError, + QueryFilter, QueryIter, QueryManyIter, QueryManyUniqueIter, QueryParIter, QueryParManyIter, + QueryParManyUniqueIter, QuerySingleError, QueryState, ROQueryItem, ReadOnlyQueryData, }, world::unsafe_world_cell::UnsafeWorldCell, }; use core::{ marker::PhantomData, + mem::MaybeUninit, ops::{Deref, DerefMut}, }; -/// [System parameter] that provides selective access to the [`Component`] data stored in a [`World`]. +/// A [system parameter] that provides selective access to the [`Component`] data stored in a [`World`]. /// -/// Enables access to [entity identifiers] and [components] from a system, without the need to directly access the world. -/// Its iterators and getter methods return *query items*. -/// Each query item is a type containing data relative to an entity. +/// Queries enable systems to access [entity identifiers] and [components] without requiring direct access to the [`World`]. +/// Its iterators and getter methods return *query items*, which are types containing data related to an entity. /// /// `Query` is a generic data structure that accepts two type parameters: /// -/// - **`D` (query data).** -/// The type of data contained in the query item. +/// - **`D` (query data)**: +/// The type of data fetched by the query, which will be returned as the query item. /// Only entities that match the requested data will generate an item. /// Must implement the [`QueryData`] trait. -/// - **`F` (query filter).** -/// A set of conditions that determines whether query items should be kept or discarded. +/// - **`F` (query filter)**: +/// An optional set of conditions that determine whether query items should be kept or discarded. +/// This defaults to [`unit`], which means no additional filters will be applied. /// Must implement the [`QueryFilter`] trait. -/// This type parameter is optional. /// +/// [system parameter]: crate::system::SystemParam +/// [`Component`]: crate::component::Component /// [`World`]: crate::world::World +/// [entity identifiers]: Entity +/// [components]: crate::component::Component /// /// # Similar parameters /// -/// [`Query`] has few sibling [`SystemParam`](crate::system::system_param::SystemParam)s, which perform additional validation: +/// `Query` has few sibling [`SystemParam`]s, which perform additional validation: +/// /// - [`Single`] - Exactly one matching query item. /// - [`Option`] - Zero or one matching query item. /// - [`Populated`] - At least one matching query item. /// -/// Those parameters will prevent systems from running if their requirements aren't met. +/// These parameters will prevent systems from running if their requirements are not met. +/// +/// [`SystemParam`]: crate::system::system_param::SystemParam +/// [`Option`]: Single /// /// # System parameter declaration /// @@ -49,330 +57,428 @@ use core::{ /// /// ## Component access /// -/// A query defined with a reference to a component as the query fetch type parameter can be used to generate items that refer to the data of said component. +/// You can fetch an entity's component by specifying a reference to that component in the query's data parameter: /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; -/// # fn immutable_ref( -/// // A component can be accessed by shared reference... -/// query: Query<&ComponentA> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(immutable_ref); +/// # +/// // A component can be accessed by a shared reference... +/// fn immutable_query(query: Query<&ComponentA>) { +/// // ... +/// } /// -/// # fn mutable_ref( -/// // ... or by mutable reference. -/// query: Query<&mut ComponentA> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(mutable_ref); +/// // ...or by a mutable reference. +/// fn mutable_query(query: Query<&mut ComponentA>) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(immutable_query); +/// # bevy_ecs::system::assert_is_system(mutable_query); +/// ``` +/// +/// Note that components need to be behind a reference (`&` or `&mut`), or the query will not compile: +/// +/// ```compile_fail,E0277 +/// # use bevy_ecs::prelude::*; +/// # +/// # #[derive(Component)] +/// # struct ComponentA; +/// # +/// // This needs to be `&ComponentA` or `&mut ComponentA` in order to compile. +/// fn invalid_query(query: Query) { +/// // ... +/// } /// ``` /// /// ## Query filtering /// -/// Setting the query filter type parameter will ensure that each query item satisfies the given condition. +/// Setting the query filter type parameter will ensure that each query item satisfies the given condition: /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; +/// # /// # #[derive(Component)] /// # struct ComponentB; -/// # fn system( -/// // Just `ComponentA` data will be accessed, but only for entities that also contain -/// // `ComponentB`. -/// query: Query<&ComponentA, With> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(system); +/// # +/// // `ComponentA` data will be accessed, but only for entities that also contain `ComponentB`. +/// fn filtered_query(query: Query<&ComponentA, With>) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(filtered_query); /// ``` /// +/// Note that the filter is `With`, not `With<&ComponentB>`. Unlike query data, `With` +/// does require components to be behind a reference. +/// /// ## `QueryData` or `QueryFilter` tuples /// -/// Using tuples, each `Query` type parameter can contain multiple elements. +/// Using [`tuple`]s, each `Query` type parameter can contain multiple elements. /// -/// In the following example, two components are accessed simultaneously, and the query items are filtered on two conditions. +/// In the following example two components are accessed simultaneously, and the query items are +/// filtered on two conditions: /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; +/// # /// # #[derive(Component)] /// # struct ComponentB; +/// # /// # #[derive(Component)] /// # struct ComponentC; +/// # /// # #[derive(Component)] /// # struct ComponentD; -/// # fn immutable_ref( -/// query: Query<(&ComponentA, &ComponentB), (With, Without)> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(immutable_ref); +/// # +/// fn complex_query( +/// query: Query<(&mut ComponentA, &ComponentB), (With, Without)> +/// ) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(complex_query); +/// ``` +/// +/// Note that this currently only works on tuples with 15 or fewer items. You may nest tuples to +/// get around this limit: +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// # +/// # #[derive(Component)] +/// # struct ComponentA; +/// # +/// # #[derive(Component)] +/// # struct ComponentB; +/// # +/// # #[derive(Component)] +/// # struct ComponentC; +/// # +/// # #[derive(Component)] +/// # struct ComponentD; +/// # +/// fn nested_query( +/// query: Query<(&ComponentA, &ComponentB, (&mut ComponentC, &mut ComponentD))> +/// ) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(nested_query); /// ``` /// /// ## Entity identifier access /// -/// The identifier of an entity can be made available inside the query item by including [`Entity`] in the query fetch type parameter. +/// You can access [`Entity`], the entity identifier, by including it in the query data parameter: /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; -/// # fn system( -/// query: Query<(Entity, &ComponentA)> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(system); +/// # +/// fn entity_id_query(query: Query<(Entity, &ComponentA)>) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(entity_id_query); /// ``` /// +/// Be aware that [`Entity`] is not a component, so it does not need to be behind a reference. +/// /// ## Optional component access /// -/// A component can be made optional in a query by wrapping it into an [`Option`]. -/// In this way, a query item can still be generated even if the queried entity does not contain the wrapped component. -/// In this case, its corresponding value will be `None`. +/// A component can be made optional by wrapping it into an [`Option`]. In the following example, a +/// query item will still be generated even if the queried entity does not contain `ComponentB`. +/// When this is the case, `Option<&ComponentB>`'s corresponding value will be `None`. /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; +/// # /// # #[derive(Component)] /// # struct ComponentB; -/// # fn system( -/// // Generates items for entities that contain `ComponentA`, and optionally `ComponentB`. -/// query: Query<(&ComponentA, Option<&ComponentB>)> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(system); +/// # +/// // A queried items must contain `ComponentA`. If they also contain `ComponentB`, its value will +/// // be fetched as well. +/// fn optional_component_query(query: Query<(&ComponentA, Option<&ComponentB>)>) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(optional_component_query); /// ``` /// -/// See the documentation for [`AnyOf`] to idiomatically declare many optional components. +/// Optional components can hurt performance in some cases, so please read the [performance] +/// section to learn more about them. Additionally, if you need to declare several optional +/// components, you may be interested in using [`AnyOf`]. /// -/// See the [performance] section to learn more about the impact of optional components. +/// [performance]: #performance +/// [`AnyOf`]: crate::query::AnyOf /// /// ## Disjoint queries /// -/// A system cannot contain two queries that break Rust's mutability rules. -/// In this case, the [`Without`] filter can be used to disjoint them. +/// A system cannot contain two queries that break Rust's mutability rules, or else it will panic +/// when initialized. This can often be fixed with the [`Without`] filter, which makes the queries +/// disjoint. /// -/// In the following example, two queries mutably access the same component. -/// Executing this system will panic, since an entity could potentially match the two queries at the same time by having both `Player` and `Enemy` components. -/// This would violate mutability rules. +/// In the following example, the two queries can mutably access the same `&mut Health` component +/// if an entity has both the `Player` and `Enemy` components. Bevy will catch this and panic, +/// however, instead of breaking Rust's mutability rules: /// /// ```should_panic /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct Health; +/// # /// # #[derive(Component)] /// # struct Player; +/// # /// # #[derive(Component)] /// # struct Enemy; /// # /// fn randomize_health( /// player_query: Query<&mut Health, With>, /// enemy_query: Query<&mut Health, With>, -/// ) -/// # {} -/// # let mut randomize_health_system = IntoSystem::into_system(randomize_health); -/// # let mut world = World::new(); -/// # randomize_health_system.initialize(&mut world); -/// # randomize_health_system.run((), &mut world); +/// ) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_system_does_not_conflict(randomize_health); /// ``` /// -/// Adding a `Without` filter will disjoint the queries. -/// In this way, any entity that has both `Player` and `Enemy` components is excluded from both queries. +/// Adding a [`Without`] filter will disjoint the queries. In the following example, any entity +/// that has both the `Player` and `Enemy` components will be excluded from _both_ queries: /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct Health; +/// # /// # #[derive(Component)] /// # struct Player; +/// # /// # #[derive(Component)] /// # struct Enemy; /// # /// fn randomize_health( /// player_query: Query<&mut Health, (With, Without)>, /// enemy_query: Query<&mut Health, (With, Without)>, -/// ) -/// # {} -/// # let mut randomize_health_system = IntoSystem::into_system(randomize_health); -/// # let mut world = World::new(); -/// # randomize_health_system.initialize(&mut world); -/// # randomize_health_system.run((), &mut world); +/// ) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_system_does_not_conflict(randomize_health); /// ``` /// -/// An alternative to this idiom is to wrap the conflicting queries into a [`ParamSet`](super::ParamSet). +/// An alternative solution to this problem would be to wrap the conflicting queries in +/// [`ParamSet`]. +/// +/// [`Without`]: crate::query::Without +/// [`ParamSet`]: crate::system::ParamSet /// /// ## Whole Entity Access /// -/// [`EntityRef`]s can be fetched from a query. This will give read-only access to any component on the entity, -/// and can be used to dynamically fetch any component without baking it into the query type. Due to this global -/// access to the entity, this will block any other system from parallelizing with it. As such these queries -/// should be sparingly used. +/// [`EntityRef`] can be used in a query to gain read-only access to all components of an entity. +/// This is useful when dynamically fetching components instead of baking them into the query type. /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; -/// # fn system( -/// query: Query<(EntityRef, &ComponentA)> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(system); +/// # +/// fn all_components_query(query: Query<(EntityRef, &ComponentA)>) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(all_components_query); /// ``` /// -/// As `EntityRef` can read any component on an entity, a query using it will conflict with *any* mutable -/// access. It is strongly advised to couple `EntityRef` queries with the use of either `With`/`Without` -/// filters or `ParamSets`. This also limits the scope of the query, which will improve iteration performance -/// and also allows it to parallelize with other non-conflicting systems. +/// As [`EntityRef`] can read any component on an entity, a query using it will conflict with *any* +/// mutable component access. /// /// ```should_panic /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; -/// # fn system( -/// // This will panic! -/// // EntityRef provides read access to ALL components on an entity. -/// // When combined with &mut ComponentA in the same query, it creates -/// // a conflict because EntityRef could read ComponentA while the &mut -/// // attempts to modify it - violating Rust's borrowing rules of no -/// // simultaneous read+write access. -/// query: Query<(EntityRef, &mut ComponentA)> -/// # ) {} -/// # bevy_ecs::system::assert_system_does_not_conflict(system); +/// # +/// // `EntityRef` provides read access to *all* components on an entity. When combined with +/// // `&mut ComponentA` in the same query, it creates a conflict because `EntityRef` could read +/// // `&ComponentA` while `&mut ComponentA` attempts to modify it - violating Rust's borrowing +/// // rules. +/// fn invalid_query(query: Query<(EntityRef, &mut ComponentA)>) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_system_does_not_conflict(invalid_query); /// ``` +/// +/// It is strongly advised to couple [`EntityRef`] queries with the use of either [`With`] / +/// [`Without`] filters or [`ParamSet`]s. Not only does this improve the performance and +/// parallelization of the system, but it enables systems to gain mutable access to other +/// components: +/// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; +/// # /// # #[derive(Component)] /// # struct ComponentB; -/// # fn system( -/// // This will not panic. -/// // This creates a perfect separation where: -/// // 1. First query reads entities that have ComponentA -/// // 2. Second query modifies ComponentB only on entities that DON'T have ComponentA -/// // Result: No entity can ever be accessed by both queries simultaneously -/// query_a: Query>, -/// query_b: Query<&mut ComponentB, Without>, -/// # ) {} -/// # bevy_ecs::system::assert_system_does_not_conflict(system); +/// # +/// // The first query only reads entities that have `ComponentA`, while the second query only +/// // modifies entities that *don't* have `ComponentA`. Because neither query will access the same +/// // entity, this system does not conflict. +/// fn disjoint_query( +/// query_a: Query>, +/// query_b: Query<&mut ComponentB, Without>, +/// ) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_system_does_not_conflict(disjoint_query); /// ``` +/// /// The fundamental rule: [`EntityRef`]'s ability to read all components means it can never -/// coexist with mutable access. With/Without filters guarantee this by keeping the +/// coexist with mutable access. [`With`] / [`Without`] filters can guarantee this by keeping the /// queries on completely separate entities. /// +/// [`EntityRef`]: crate::world::EntityRef +/// [`With`]: crate::query::With +/// /// # Accessing query items /// -/// The following table summarizes the behavior of the safe methods that can be used to get query items. +/// The following table summarizes the behavior of safe methods that can be used to get query +/// items: /// /// |Query methods|Effect| -/// |:---:|---| -/// |[`iter`]\[[`_mut`][`iter_mut`]]|Returns an iterator over all query items.| -/// |[[`iter().for_each()`][`for_each`]\[[`iter_mut().for_each()`][`for_each`]],
[`par_iter`]\[[`_mut`][`par_iter_mut`]]|Runs a specified function for each query item.| -/// |[`iter_many`]\[[`_mut`][`iter_many_mut`]]|Iterates or runs a specified function over query items generated by a list of entities.| -/// |[`iter_combinations`]\[[`_mut`][`iter_combinations_mut`]]|Returns an iterator over all combinations of a specified number of query items.| -/// |[`get`]\[[`_mut`][`get_mut`]]|Returns the query item for the specified entity.| -/// |[`many`]\[[`_mut`][`many_mut`]],
[`get_many`]\[[`_mut`][`get_many_mut`]]|Returns the query items for the specified entities.| -/// |[`single`]\[[`_mut`][`single_mut`]],
[`get_single`]\[[`_mut`][`get_single_mut`]]|Returns the query item while verifying that there aren't others.| +/// |-|-| +/// |[`iter`]\[[`_mut`][`iter_mut`]\]|Returns an iterator over all query items.| +/// |[`iter[_mut]().for_each()`][`for_each`],
[`par_iter`]\[[`_mut`][`par_iter_mut`]\]|Runs a specified function for each query item.| +/// |[`iter_many`]\[[`_unique`][`iter_many_unique`]\]\[[`_mut`][`iter_many_mut`]\]|Iterates over query items that match a list of entities.| +/// |[`iter_combinations`]\[[`_mut`][`iter_combinations_mut`]\]|Iterates over all combinations of query items.| +/// |[`single`](Self::single)\[[`_mut`][`single_mut`]\]|Returns a single query item if only one exists.| +/// |[`get`]\[[`_mut`][`get_mut`]\]|Returns the query item for a specified entity.| +/// |[`get_many`]\[[`_unique`][`get_many_unique`]\]\[[`_mut`][`get_many_mut`]\]|Returns all query items that match a list of entities.| /// /// There are two methods for each type of query operation: immutable and mutable (ending with `_mut`). /// When using immutable methods, the query items returned are of type [`ROQueryItem`], a read-only version of the query item. /// In this circumstance, every mutable reference in the query fetch type parameter is substituted by a shared reference. /// +/// [`iter`]: Self::iter +/// [`iter_mut`]: Self::iter_mut +/// [`for_each`]: #iteratorfor_each +/// [`par_iter`]: Self::par_iter +/// [`par_iter_mut`]: Self::par_iter_mut +/// [`iter_many`]: Self::iter_many +/// [`iter_many_unique`]: Self::iter_many_unique +/// [`iter_many_mut`]: Self::iter_many_mut +/// [`iter_combinations`]: Self::iter_combinations +/// [`iter_combinations_mut`]: Self::iter_combinations_mut +/// [`single_mut`]: Self::single_mut +/// [`get`]: Self::get +/// [`get_mut`]: Self::get_mut +/// [`get_many`]: Self::get_many +/// [`get_many_unique`]: Self::get_many_unique +/// [`get_many_mut`]: Self::get_many_mut +/// /// # Performance /// -/// Creating a `Query` is a low-cost constant operation. -/// Iterating it, on the other hand, fetches data from the world and generates items, which can have a significant computational cost. +/// Creating a `Query` is a low-cost constant operation. Iterating it, on the other hand, fetches +/// data from the world and generates items, which can have a significant computational cost. /// -/// [`Table`] component storage type is much more optimized for query iteration than [`SparseSet`]. +/// Two systems cannot be executed in parallel if both access the same component type where at +/// least one of the accesses is mutable. Because of this, it is recommended for queries to only +/// fetch mutable access to components when necessary, since immutable access can be parallelized. /// -/// Two systems cannot be executed in parallel if both access the same component type where at least one of the accesses is mutable. -/// This happens unless the executor can verify that no entity could be found in both queries. +/// Query filters ([`With`] / [`Without`]) can improve performance because they narrow the kinds of +/// entities that can be fetched. Systems that access fewer kinds of entities are more likely to be +/// parallelized by the scheduler. /// -/// Optional components increase the number of entities a query has to match against. -/// This can hurt iteration performance, especially if the query solely consists of only optional components, since the query would iterate over each entity in the world. +/// On the other hand, be careful using optional components (`Option<&ComponentA>`) and +/// [`EntityRef`] because they broaden the amount of entities kinds that can be accessed. This is +/// especially true of a query that _only_ fetches optional components or [`EntityRef`], as the +/// query would iterate over all entities in the world. /// -/// The following table compares the computational complexity of the various methods and operations, where: +/// There are two types of [component storage types]: [`Table`] and [`SparseSet`]. [`Table`] offers +/// fast iteration speeds, but slower insertion and removal speeds. [`SparseSet`] is the opposite: +/// it offers fast component insertion and removal speeds, but slower iteration speeds. /// -/// - **n** is the number of entities that match the query, -/// - **r** is the number of elements in a combination, -/// - **k** is the number of involved entities in the operation, -/// - **a** is the number of archetypes in the world, -/// - **C** is the [binomial coefficient], used to count combinations. -/// nCr is read as "*n* choose *r*" and is equivalent to the number of distinct unordered subsets of *r* elements that can be taken from a set of *n* elements. +/// The following table compares the computational complexity of the various methods and +/// operations, where: +/// +/// - **n** is the number of entities that match the query. +/// - **r** is the number of elements in a combination. +/// - **k** is the number of involved entities in the operation. +/// - **a** is the number of archetypes in the world. +/// - **C** is the [binomial coefficient], used to count combinations. nCr is +/// read as "*n* choose *r*" and is equivalent to the number of distinct unordered subsets of *r* +/// elements that can be taken from a set of *n* elements. /// /// |Query operation|Computational complexity| -/// |:---:|:---:| -/// |[`iter`]\[[`_mut`][`iter_mut`]]|O(n)| -/// |[[`iter().for_each()`][`for_each`]\[[`iter_mut().for_each()`][`for_each`]],
[`par_iter`]\[[`_mut`][`par_iter_mut`]]|O(n)| -/// |[`iter_many`]\[[`_mut`][`iter_many_mut`]]|O(k)| -/// |[`iter_combinations`]\[[`_mut`][`iter_combinations_mut`]]|O(nCr)| -/// |[`get`]\[[`_mut`][`get_mut`]]|O(1)| -/// |([`get_`][`get_many`])[`many`]|O(k)| -/// |([`get_`][`get_many_mut`])[`many_mut`]|O(k2)| -/// |[`single`]\[[`_mut`][`single_mut`]],
[`get_single`]\[[`_mut`][`get_single_mut`]]|O(a)| -/// |Archetype based filtering ([`With`], [`Without`], [`Or`])|O(a)| +/// |-|-| +/// |[`iter`]\[[`_mut`][`iter_mut`]\]|O(n)| +/// |[`iter[_mut]().for_each()`][`for_each`],
[`par_iter`]\[[`_mut`][`par_iter_mut`]\]|O(n)| +/// |[`iter_many`]\[[`_mut`][`iter_many_mut`]\]|O(k)| +/// |[`iter_combinations`]\[[`_mut`][`iter_combinations_mut`]\]|O(nCr)| +/// |[`single`](Self::single)\[[`_mut`][`single_mut`]\]|O(a)| +/// |[`get`]\[[`_mut`][`get_mut`]\]|O(1)| +/// |[`get_many`]|O(k)| +/// |[`get_many_mut`]|O(k2)| +/// |Archetype-based filtering ([`With`], [`Without`], [`Or`])|O(a)| /// |Change detection filtering ([`Added`], [`Changed`])|O(a + n)| /// +/// [component storage types]: crate::component::StorageType +/// [`Table`]: crate::storage::Table +/// [`SparseSet`]: crate::storage::SparseSet +/// [binomial coefficient]: https://en.wikipedia.org/wiki/Binomial_coefficient +/// [`Or`]: crate::query::Or +/// [`Added`]: crate::query::Added +/// [`Changed`]: crate::query::Changed +/// /// # `Iterator::for_each` /// -/// `for_each` methods are seen to be generally faster than directly iterating through `iter` on worlds with high archetype -/// fragmentation, and may enable additional optimizations like [autovectorization]. It is strongly advised to only use -/// [`Iterator::for_each`] if it tangibly improves performance. *Always* be sure profile or benchmark both before and -/// after the change! +/// The `for_each` methods appear to be generally faster than `for`-loops when run on worlds with +/// high archetype fragmentation, and may enable additional optimizations like [autovectorization]. It +/// is strongly advised to only use [`Iterator::for_each`] if it tangibly improves performance. +/// *Always* profile or benchmark before and after the change! /// /// ```rust /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; -/// # fn system( -/// # query: Query<&ComponentA>, -/// # ) { -/// // This might be result in better performance... -/// query.iter().for_each(|component| { -/// // do things with the component -/// }); -/// // ...than this. Always be sure to benchmark to validate the difference! -/// for component in query.iter() { -/// // do things with the component +/// # +/// fn system(query: Query<&ComponentA>) { +/// // This may result in better performance... +/// query.iter().for_each(|component| { +/// // ... +/// }); +/// +/// // ...than this. Always benchmark to validate the difference! +/// for component in query.iter() { +/// // ... +/// } /// } -/// # } -/// # bevy_ecs::system::assert_system_does_not_conflict(system); +/// # +/// # bevy_ecs::system::assert_is_system(system); /// ``` /// -/// [`Component`]: crate::component::Component /// [autovectorization]: https://en.wikipedia.org/wiki/Automatic_vectorization -/// [`Added`]: crate::query::Added -/// [`AnyOf`]: crate::query::AnyOf -/// [binomial coefficient]: https://en.wikipedia.org/wiki/Binomial_coefficient -/// [`Changed`]: crate::query::Changed -/// [components]: crate::component::Component -/// [entity identifiers]: Entity -/// [`EntityRef`]: crate::world::EntityRef -/// [`for_each`]: #iterator-for-each -/// [`get`]: Self::get -/// [`get_many`]: Self::get_many -/// [`get_many_mut`]: Self::get_many_mut -/// [`get_mut`]: Self::get_mut -/// [`get_single`]: Self::get_single -/// [`get_single_mut`]: Self::get_single_mut -/// [`iter`]: Self::iter -/// [`iter_combinations`]: Self::iter_combinations -/// [`iter_combinations_mut`]: Self::iter_combinations_mut -/// [`iter_many`]: Self::iter_many -/// [`iter_many_mut`]: Self::iter_many_mut -/// [`iter_mut`]: Self::iter_mut -/// [`many`]: Self::many -/// [`many_mut`]: Self::many_mut -/// [`Or`]: crate::query::Or -/// [`par_iter`]: Self::par_iter -/// [`par_iter_mut`]: Self::par_iter_mut -/// [performance]: #performance -/// [`Single`]: Single -/// [`Option`]: Single -/// [`single`]: Self::single -/// [`single_mut`]: Self::single_mut -/// [`SparseSet`]: crate::storage::SparseSet -/// [System parameter]: crate::system::SystemParam -/// [`Table`]: crate::storage::Table -/// [`With`]: crate::query::With -/// [`Without`]: crate::query::Without pub struct Query<'world, 'state, D: QueryData, F: QueryFilter = ()> { // SAFETY: Must have access to the components registered in `state`. world: UnsafeWorldCell<'world>, @@ -439,6 +545,18 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { unsafe { self.reborrow_unsafe() }.into_readonly() } + /// Returns another `Query` from this does not return any data, which can be faster. + fn as_nop(&self) -> Query<'_, 's, NopWorldQuery, F> { + let new_state = self.state.as_nop(); + // SAFETY: + // - The reborrowed query is converted to read-only, so it cannot perform mutable access, + // and the original query is held with a shared borrow, so it cannot perform mutable access either. + // Note that although `NopWorldQuery` itself performs *no* access and could soundly alias a mutable query, + // it has the original `QueryState::component_access` and could be `transmute`d to a read-only query. + // - The world matches because it was the same one used to construct self. + unsafe { Query::new(self.world, new_state, self.last_run, self.this_run) } + } + /// Returns another `Query` from this that fetches the read-only version of the query items. /// /// For example, `Query<(&mut D1, &D2, &mut D3), With>` will become `Query<(&D1, &D2, &D3), With>`. @@ -498,6 +616,24 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// - [`reborrow`](Self::reborrow) for the safe versions. pub unsafe fn reborrow_unsafe(&self) -> Query<'_, 's, D, F> { + // SAFETY: + // - This is memory safe because the caller ensures that there are no conflicting references. + // - The world matches because it was the same one used to construct self. + unsafe { self.copy_unsafe() } + } + + /// Returns a new `Query` copying the access from this one. + /// The current query will still be usable while the new one exists, but must not be used in a way that violates aliasing. + /// + /// # Safety + /// + /// This function makes it possible to violate Rust's aliasing guarantees. + /// You must make sure this call does not result in a mutable or shared reference to a component with a mutable reference. + /// + /// # See also + /// + /// - [`reborrow_unsafe`](Self::reborrow_unsafe) for a safer version that constrains the returned `'w` lifetime to the length of the borrow. + unsafe fn copy_unsafe(&self) -> Query<'w, 's, D, F> { // SAFETY: // - This is memory safe because the caller ensures that there are no conflicting references. // - The world matches because it was the same one used to construct self. @@ -653,10 +789,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { #[inline] pub fn iter_combinations_inner(self) -> QueryCombinationIter<'w, 's, D, F, K> { // SAFETY: `self.world` has permission to access the required components. - unsafe { - self.state - .iter_combinations_unchecked_manual(self.world, self.last_run, self.this_run) - } + unsafe { QueryCombinationIter::new(self.world, self.state, self.last_run, self.this_run) } } /// Returns an [`Iterator`] over the read-only query items generated from an [`Entity`] list. @@ -697,7 +830,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// - [`iter_many_mut`](Self::iter_many_mut) to get mutable query items. /// - [`iter_many_inner`](Self::iter_many_inner) to get mutable query items with the full `'world` lifetime. #[inline] - pub fn iter_many>( + pub fn iter_many>( &self, entities: EntityList, ) -> QueryManyIter<'_, 's, D::ReadOnly, F, EntityList::IntoIter> { @@ -742,7 +875,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// - [`iter_many`](Self::iter_many) to get read-only query items. /// - [`iter_many_inner`](Self::iter_many_inner) to get mutable query items with the full `'world` lifetime. #[inline] - pub fn iter_many_mut>( + pub fn iter_many_mut>( &mut self, entities: EntityList, ) -> QueryManyIter<'_, 's, D, F, EntityList::IntoIter> { @@ -760,15 +893,16 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// - [`iter_many`](Self::iter_many) to get read-only query items. /// - [`iter_many_mut`](Self::iter_many_mut) to get mutable query items. #[inline] - pub fn iter_many_inner>( + pub fn iter_many_inner>( self, entities: EntityList, ) -> QueryManyIter<'w, 's, D, F, EntityList::IntoIter> { // SAFETY: `self.world` has permission to access the required components. unsafe { - self.state.iter_many_unchecked_manual( - entities, + QueryManyIter::new( self.world, + self.state, + entities, self.last_run, self.this_run, ) @@ -822,22 +956,13 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # See also /// /// - [`iter_many_unique_mut`](Self::iter_many_unique_mut) to get mutable query items. + /// - [`iter_many_unique_inner`](Self::iter_many_unique_inner) to get with the actual "inner" world lifetime. #[inline] pub fn iter_many_unique( &self, entities: EntityList, ) -> QueryManyUniqueIter<'_, 's, D::ReadOnly, F, EntityList::IntoIter> { - // SAFETY: - // - `self.world` has permission to access the required components. - // - The query is read-only, so it can be aliased even if it was originally mutable. - unsafe { - self.state.as_readonly().iter_many_unique_unchecked_manual( - entities, - self.world, - self.last_run, - self.this_run, - ) - } + self.as_readonly().iter_many_unique_inner(entities) } /// Returns an iterator over the unique query items generated from an [`EntitySet`]. @@ -883,16 +1008,76 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// } /// # bevy_ecs::system::assert_is_system(system); /// ``` + /// # See also + /// + /// - [`iter_many_unique`](Self::iter_many_unique) to get read-only query items. + /// - [`iter_many_unique_inner`](Self::iter_many_unique_inner) to get with the actual "inner" world lifetime. #[inline] pub fn iter_many_unique_mut( &mut self, entities: EntityList, ) -> QueryManyUniqueIter<'_, 's, D, F, EntityList::IntoIter> { + self.reborrow().iter_many_unique_inner(entities) + } + + /// Returns an iterator over the unique query items generated from an [`EntitySet`]. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// Items are returned in the order of the list of entities. Entities that don't match the query are skipped. + /// + /// # Examples + /// + /// ``` + /// # use bevy_ecs::{prelude::*, entity::{EntitySet, UniqueEntityIter}}; + /// # use core::slice; + /// #[derive(Component)] + /// struct Counter { + /// value: i32 + /// } + /// + /// // `Friends` ensures that it only lists unique entities. + /// #[derive(Component)] + /// struct Friends { + /// unique_list: Vec, + /// } + /// + /// impl<'a> IntoIterator for &'a Friends { + /// type Item = &'a Entity; + /// type IntoIter = UniqueEntityIter>; + /// + /// fn into_iter(self) -> Self::IntoIter { + /// // SAFETY: `Friends` ensures that it unique_list contains only unique entities. + /// unsafe { UniqueEntityIter::from_iterator_unchecked(self.unique_list.iter()) } + /// } + /// } + /// + /// fn system( + /// friends_query: Query<&Friends>, + /// mut counter_query: Query<&mut Counter>, + /// ) { + /// let friends = friends_query.single().unwrap(); + /// for mut counter in counter_query.iter_many_unique_inner(friends) { + /// println!("Friend's counter: {:?}", counter.value); + /// counter.value += 1; + /// } + /// } + /// # bevy_ecs::system::assert_is_system(system); + /// ``` + /// # See also + /// + /// - [`iter_many_unique`](Self::iter_many_unique) to get read-only query items. + /// - [`iter_many_unique_mut`](Self::iter_many_unique_mut) to get mutable query items. + #[inline] + pub fn iter_many_unique_inner( + self, + entities: EntityList, + ) -> QueryManyUniqueIter<'w, 's, D, F, EntityList::IntoIter> { // SAFETY: `self.world` has permission to access the required components. unsafe { - self.state.iter_many_unique_unchecked_manual( - entities, + QueryManyUniqueIter::new( self.world, + self.state, + entities, self.last_run, self.this_run, ) @@ -953,7 +1138,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # See also /// /// - [`iter_many_mut`](Self::iter_many_mut) to safely access the query items. - pub unsafe fn iter_many_unsafe>( + pub unsafe fn iter_many_unsafe>( &self, entities: EntityList, ) -> QueryManyIter<'_, 's, D, F, EntityList::IntoIter> { @@ -972,22 +1157,15 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// # See also /// - /// - [`iter_many_mut`](Self::iter_many_mut) to safely access the query items. + /// - [`iter_many_unique`](Self::iter_many_unique) to get read-only query items. + /// - [`iter_many_unique_mut`](Self::iter_many_unique_mut) to get mutable query items. + /// - [`iter_many_unique_inner`](Self::iter_many_unique_inner) to get with the actual "inner" world lifetime. pub unsafe fn iter_many_unique_unsafe( &self, entities: EntityList, ) -> QueryManyUniqueIter<'_, 's, D, F, EntityList::IntoIter> { - // SAFETY: - // - `self.world` has permission to access the required components. - // - The caller ensures that this operation will not result in any aliased mutable accesses. - unsafe { - self.state.iter_many_unique_unchecked_manual( - entities, - self.world, - self.last_run, - self.this_run, - ) - } + // SAFETY: The caller promises that this will not result in multiple mutable references. + unsafe { self.reborrow_unsafe() }.iter_many_unique_inner(entities) } /// Returns a parallel iterator over the query results for the given [`World`]. @@ -1082,6 +1260,94 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { } } + /// Returns a parallel iterator over the read-only query items generated from an [`Entity`] list. + /// + /// Entities that don't match the query are skipped. Iteration order and thread assignment is not guaranteed. + /// + /// If the `multithreaded` feature is disabled, iterating with this operates identically to [`Iterator::for_each`] + /// on [`QueryManyIter`]. + /// + /// This can only be called for read-only queries. To avoid potential aliasing, there is no `par_iter_many_mut` equivalent. + /// See [`par_iter_many_unique_mut`] for an alternative using [`EntitySet`]. + /// + /// Note that you must use the `for_each` method to iterate over the + /// results, see [`par_iter_mut`] for an example. + /// + /// [`par_iter_many_unique_mut`]: Self::par_iter_many_unique_mut + /// [`par_iter_mut`]: Self::par_iter_mut + #[inline] + pub fn par_iter_many>( + &self, + entities: EntityList, + ) -> QueryParManyIter<'_, '_, D::ReadOnly, F, EntityList::Item> { + QueryParManyIter { + world: self.world, + state: self.state.as_readonly(), + entity_list: entities.into_iter().collect(), + last_run: self.last_run, + this_run: self.this_run, + batching_strategy: BatchingStrategy::new(), + } + } + + /// Returns a parallel iterator over the unique read-only query items generated from an [`EntitySet`]. + /// + /// Entities that don't match the query are skipped. Iteration order and thread assignment is not guaranteed. + /// + /// If the `multithreaded` feature is disabled, iterating with this operates identically to [`Iterator::for_each`] + /// on [`QueryManyUniqueIter`]. + /// + /// This can only be called for read-only queries, see [`par_iter_many_unique_mut`] for write-queries. + /// + /// Note that you must use the `for_each` method to iterate over the + /// results, see [`par_iter_mut`] for an example. + /// + /// [`par_iter_many_unique_mut`]: Self::par_iter_many_unique_mut + /// [`par_iter_mut`]: Self::par_iter_mut + #[inline] + pub fn par_iter_many_unique>( + &self, + entities: EntityList, + ) -> QueryParManyUniqueIter<'_, '_, D::ReadOnly, F, EntityList::Item> { + QueryParManyUniqueIter { + world: self.world, + state: self.state.as_readonly(), + entity_list: entities.into_iter().collect(), + last_run: self.last_run, + this_run: self.this_run, + batching_strategy: BatchingStrategy::new(), + } + } + + /// Returns a parallel iterator over the unique query items generated from an [`EntitySet`]. + /// + /// Entities that don't match the query are skipped. Iteration order and thread assignment is not guaranteed. + /// + /// If the `multithreaded` feature is disabled, iterating with this operates identically to [`Iterator::for_each`] + /// on [`QueryManyUniqueIter`]. + /// + /// This can only be called for mutable queries, see [`par_iter_many_unique`] for read-only-queries. + /// + /// Note that you must use the `for_each` method to iterate over the + /// results, see [`par_iter_mut`] for an example. + /// + /// [`par_iter_many_unique`]: Self::par_iter_many_unique + /// [`par_iter_mut`]: Self::par_iter_mut + #[inline] + pub fn par_iter_many_unique_mut>( + &mut self, + entities: EntityList, + ) -> QueryParManyUniqueIter<'_, '_, D, F, EntityList::Item> { + QueryParManyUniqueIter { + world: self.world, + state: self.state, + entity_list: entities.into_iter().collect(), + last_run: self.last_run, + this_run: self.this_run, + batching_strategy: BatchingStrategy::new(), + } + } + /// Returns the read-only query item for the given [`Entity`]. /// /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is returned instead. @@ -1126,18 +1392,103 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is returned instead. /// The elements of the array do not need to be unique, unlike `get_many_mut`. /// + /// # Examples + /// + /// ``` + /// use bevy_ecs::prelude::*; + /// use bevy_ecs::query::QueryEntityError; + /// + /// #[derive(Component, PartialEq, Debug)] + /// struct A(usize); + /// + /// let mut world = World::new(); + /// let entity_vec: Vec = (0..3).map(|i| world.spawn(A(i)).id()).collect(); + /// let entities: [Entity; 3] = entity_vec.try_into().unwrap(); + /// + /// world.spawn(A(73)); + /// + /// let mut query_state = world.query::<&A>(); + /// let query = query_state.query(&world); + /// + /// let component_values = query.get_many(entities).unwrap(); + /// + /// assert_eq!(component_values, [&A(0), &A(1), &A(2)]); + /// + /// let wrong_entity = Entity::from_raw(365); + /// + /// assert_eq!( + /// match query.get_many([wrong_entity]).unwrap_err() { + /// QueryEntityError::EntityDoesNotExist(error) => error.entity, + /// _ => panic!(), + /// }, + /// wrong_entity + /// ); + /// ``` + /// /// # See also /// /// - [`get_many_mut`](Self::get_many_mut) to get mutable query items. + /// - [`get_many_unique`](Self::get_many_unique) to only handle unique inputs. /// - [`many`](Self::many) for the panicking version. #[inline] pub fn get_many( &self, entities: [Entity; N], ) -> Result<[ROQueryItem<'_, D>; N], QueryEntityError> { + // Note that we call a separate `*_inner` method from `get_many_mut` + // because we don't need to check for duplicates. self.as_readonly().get_many_inner(entities) } + /// Returns the read-only query items for the given [`UniqueEntityArray`]. + /// + /// The returned query items are in the same order as the input. + /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is returned instead. + /// + /// # Examples + /// + /// ``` + /// use bevy_ecs::{prelude::*, query::QueryEntityError, entity::{EntitySetIterator, UniqueEntityArray, UniqueEntityVec}}; + /// + /// #[derive(Component, PartialEq, Debug)] + /// struct A(usize); + /// + /// let mut world = World::new(); + /// let entity_set: UniqueEntityVec = world.spawn_batch((0..3).map(A)).collect_set(); + /// let entity_set: UniqueEntityArray<3> = entity_set.try_into().unwrap(); + /// + /// world.spawn(A(73)); + /// + /// let mut query_state = world.query::<&A>(); + /// let query = query_state.query(&world); + /// + /// let component_values = query.get_many_unique(entity_set).unwrap(); + /// + /// assert_eq!(component_values, [&A(0), &A(1), &A(2)]); + /// + /// let wrong_entity = Entity::from_raw(365); + /// + /// assert_eq!( + /// match query.get_many_unique(UniqueEntityArray::from([wrong_entity])).unwrap_err() { + /// QueryEntityError::EntityDoesNotExist(error) => error.entity, + /// _ => panic!(), + /// }, + /// wrong_entity + /// ); + /// ``` + /// + /// # See also + /// + /// - [`get_many_unique_mut`](Self::get_many_mut) to get mutable query items. + /// - [`get_many`](Self::get_many) to handle inputs with duplicates. + #[inline] + pub fn get_many_unique( + &self, + entities: UniqueEntityArray, + ) -> Result<[ROQueryItem<'_, D>; N], QueryEntityError> { + self.as_readonly().get_many_unique_inner(entities) + } + /// Returns the read-only query items for the given array of [`Entity`]. /// /// # Panics @@ -1181,6 +1532,10 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// - [`get_many`](Self::get_many) for the non-panicking version. #[inline] #[track_caller] + #[deprecated( + since = "0.16.0", + note = "Use `get_many` instead and handle the Result." + )] pub fn many(&self, entities: [Entity; N]) -> [ROQueryItem<'_, D>; N] { match self.get_many(entities) { Ok(items) => items, @@ -1233,12 +1588,60 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// - [`get_mut`](Self::get_mut) to get the item using a mutable borrow of the [`Query`]. #[inline] - pub fn get_inner(self, entity: Entity) -> Result, QueryEntityError<'w>> { + pub fn get_inner(self, entity: Entity) -> Result, QueryEntityError> { // SAFETY: system runs without conflicts with other systems. // same-system queries have runtime borrow checks when they conflict unsafe { - self.state - .get_unchecked_manual(self.world, entity, self.last_run, self.this_run) + let location = self + .world + .entities() + .get(entity) + .ok_or(EntityDoesNotExistError::new(entity, self.world.entities()))?; + if !self + .state + .matched_archetypes + .contains(location.archetype_id.index()) + { + return Err(QueryEntityError::QueryDoesNotMatch( + entity, + location.archetype_id, + )); + } + let archetype = self + .world + .archetypes() + .get(location.archetype_id) + .debug_checked_unwrap(); + let mut fetch = D::init_fetch( + self.world, + &self.state.fetch_state, + self.last_run, + self.this_run, + ); + let mut filter = F::init_fetch( + self.world, + &self.state.filter_state, + self.last_run, + self.this_run, + ); + + let table = self + .world + .storages() + .tables + .get(location.table_id) + .debug_checked_unwrap(); + D::set_archetype(&mut fetch, &self.state.fetch_state, archetype, table); + F::set_archetype(&mut filter, &self.state.filter_state, archetype, table); + + if F::filter_fetch(&mut filter, entity, location.table_row) { + Ok(D::fetch(&mut fetch, entity, location.table_row)) + } else { + Err(QueryEntityError::QueryDoesNotMatch( + entity, + location.archetype_id, + )) + } } } @@ -1247,16 +1650,143 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// The returned query items are in the same order as the input. /// In case of a nonexisting entity, duplicate entities or mismatched component, a [`QueryEntityError`] is returned instead. /// + /// # Examples + /// + /// ``` + /// use bevy_ecs::prelude::*; + /// use bevy_ecs::query::QueryEntityError; + /// + /// #[derive(Component, PartialEq, Debug)] + /// struct A(usize); + /// + /// let mut world = World::new(); + /// + /// let entities: Vec = (0..3).map(|i| world.spawn(A(i)).id()).collect(); + /// let entities: [Entity; 3] = entities.try_into().unwrap(); + /// + /// world.spawn(A(73)); + /// let wrong_entity = Entity::from_raw(57); + /// let invalid_entity = world.spawn_empty().id(); + /// + /// + /// let mut query_state = world.query::<&mut A>(); + /// let mut query = query_state.query_mut(&mut world); + /// + /// let mut mutable_component_values = query.get_many_mut(entities).unwrap(); + /// + /// for mut a in &mut mutable_component_values { + /// a.0 += 5; + /// } + /// + /// let component_values = query.get_many(entities).unwrap(); + /// + /// assert_eq!(component_values, [&A(5), &A(6), &A(7)]); + /// + /// assert_eq!( + /// match query + /// .get_many_mut([wrong_entity]) + /// .unwrap_err() + /// { + /// QueryEntityError::EntityDoesNotExist(error) => error.entity, + /// _ => panic!(), + /// }, + /// wrong_entity + /// ); + /// assert_eq!( + /// match query + /// .get_many_mut([invalid_entity]) + /// .unwrap_err() + /// { + /// QueryEntityError::QueryDoesNotMatch(entity, _) => entity, + /// _ => panic!(), + /// }, + /// invalid_entity + /// ); + /// assert_eq!( + /// query + /// .get_many_mut([entities[0], entities[0]]) + /// .unwrap_err(), + /// QueryEntityError::AliasedMutability(entities[0]) + /// ); + /// ``` /// # See also /// - /// - [`get_many`](Self::get_many) to get read-only query items. + /// - [`get_many`](Self::get_many) to get read-only query items without checking for duplicate entities. /// - [`many_mut`](Self::many_mut) for the panicking version. #[inline] pub fn get_many_mut( &mut self, entities: [Entity; N], ) -> Result<[D::Item<'_>; N], QueryEntityError> { - self.reborrow().get_many_inner(entities) + self.reborrow().get_many_mut_inner(entities) + } + + /// Returns the query items for the given [`UniqueEntityArray`]. + /// + /// The returned query items are in the same order as the input. + /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is returned instead. + /// + /// # Examples + /// + /// ``` + /// use bevy_ecs::{prelude::*, query::QueryEntityError, entity::{EntitySetIterator, UniqueEntityArray, UniqueEntityVec}}; + /// + /// #[derive(Component, PartialEq, Debug)] + /// struct A(usize); + /// + /// let mut world = World::new(); + /// + /// let entity_set: UniqueEntityVec = world.spawn_batch((0..3).map(A)).collect_set(); + /// let entity_set: UniqueEntityArray<3> = entity_set.try_into().unwrap(); + /// + /// world.spawn(A(73)); + /// let wrong_entity = Entity::from_raw(57); + /// let invalid_entity = world.spawn_empty().id(); + /// + /// + /// let mut query_state = world.query::<&mut A>(); + /// let mut query = query_state.query_mut(&mut world); + /// + /// let mut mutable_component_values = query.get_many_unique_mut(entity_set).unwrap(); + /// + /// for mut a in &mut mutable_component_values { + /// a.0 += 5; + /// } + /// + /// let component_values = query.get_many_unique(entity_set).unwrap(); + /// + /// assert_eq!(component_values, [&A(5), &A(6), &A(7)]); + /// + /// assert_eq!( + /// match query + /// .get_many_unique_mut(UniqueEntityArray::from([wrong_entity])) + /// .unwrap_err() + /// { + /// QueryEntityError::EntityDoesNotExist(error) => error.entity, + /// _ => panic!(), + /// }, + /// wrong_entity + /// ); + /// assert_eq!( + /// match query + /// .get_many_unique_mut(UniqueEntityArray::from([invalid_entity])) + /// .unwrap_err() + /// { + /// QueryEntityError::QueryDoesNotMatch(entity, _) => entity, + /// _ => panic!(), + /// }, + /// invalid_entity + /// ); + /// ``` + /// # See also + /// + /// - [`get_many_unique`](Self::get_many) to get read-only query items. + #[inline] + pub fn get_many_unique_mut( + &mut self, + entities: UniqueEntityArray, + ) -> Result<[D::Item<'_>; N], QueryEntityError> { + self.reborrow().get_many_unique_inner(entities) } /// Returns the query items for the given array of [`Entity`]. @@ -1267,18 +1797,89 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// # See also /// - /// - [`get_many`](Self::get_many) to get read-only query items. + /// - [`get_many`](Self::get_many) to get read-only query items without checking for duplicate entities. /// - [`get_many_mut`](Self::get_many_mut) to get items using a mutable reference. + /// - [`get_many_inner`](Self::get_many_mut_inner) to get read-only query items with the actual "inner" world lifetime. + #[inline] + pub fn get_many_mut_inner( + self, + entities: [Entity; N], + ) -> Result<[D::Item<'w>; N], QueryEntityError> { + // Verify that all entities are unique + for i in 0..N { + for j in 0..i { + if entities[i] == entities[j] { + return Err(QueryEntityError::AliasedMutability(entities[i])); + } + } + } + // SAFETY: All entities are unique, so the results don't alias. + unsafe { self.get_many_impl(entities) } + } + + /// Returns the query items for the given array of [`Entity`]. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// The returned query items are in the same order as the input. + /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is returned instead. + /// + /// # See also + /// + /// - [`get_many`](Self::get_many) to get read-only query items without checking for duplicate entities. + /// - [`get_many_mut`](Self::get_many_mut) to get items using a mutable reference. + /// - [`get_many_mut_inner`](Self::get_many_mut_inner) to get mutable query items with the actual "inner" world lifetime. #[inline] pub fn get_many_inner( self, entities: [Entity; N], - ) -> Result<[D::Item<'w>; N], QueryEntityError<'w>> { - // SAFETY: scheduler ensures safe Query world access - unsafe { - self.state - .get_many_unchecked_manual(self.world, entities, self.last_run, self.this_run) + ) -> Result<[D::Item<'w>; N], QueryEntityError> + where + D: ReadOnlyQueryData, + { + // SAFETY: The query results are read-only, so they don't conflict if there are duplicate entities. + unsafe { self.get_many_impl(entities) } + } + + /// Returns the query items for the given [`UniqueEntityArray`]. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// The returned query items are in the same order as the input. + /// In case of a nonexisting entity, duplicate entities or mismatched component, a [`QueryEntityError`] is returned instead. + /// + /// # See also + /// + /// - [`get_many_unique`](Self::get_many_unique) to get read-only query items without checking for duplicate entities. + /// - [`get_many_unique_mut`](Self::get_many_unique_mut) to get items using a mutable reference. + #[inline] + pub fn get_many_unique_inner( + self, + entities: UniqueEntityArray, + ) -> Result<[D::Item<'w>; N], QueryEntityError> { + // SAFETY: All entities are unique, so the results don't alias. + unsafe { self.get_many_impl(entities.into_inner()) } + } + + /// Returns the query items for the given array of [`Entity`]. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// # Safety + /// + /// The caller must ensure that the query data returned for the entities does not conflict, + /// either because they are all unique or because the data is read-only. + unsafe fn get_many_impl( + self, + entities: [Entity; N], + ) -> Result<[D::Item<'w>; N], QueryEntityError> { + let mut values = [(); N].map(|_| MaybeUninit::uninit()); + + for (value, entity) in core::iter::zip(&mut values, entities) { + // SAFETY: The caller asserts that the results don't alias + let item = unsafe { self.copy_unsafe() }.get_inner(entity)?; + *value = MaybeUninit::new(item); } + + // SAFETY: Each value has been fully initialized. + Ok(values.map(|x| unsafe { x.assume_init() })) } /// Returns the query items for the given array of [`Entity`]. @@ -1331,6 +1932,10 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// - [`many`](Self::many) to get read-only query items. #[inline] #[track_caller] + #[deprecated( + since = "0.16.0", + note = "Use `get_many_mut` instead and handle the Result." + )] pub fn many_mut(&mut self, entities: [Entity; N]) -> [D::Item<'_>; N] { match self.get_many_mut(entities) { Ok(items) => items, @@ -1358,36 +1963,6 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { unsafe { self.reborrow_unsafe() }.get_inner(entity) } - /// Returns a single read-only query item when there is exactly one entity matching the query. - /// - /// # Panics - /// - /// This method panics if the number of query items is **not** exactly one. - /// - /// # Example - /// - /// ``` - /// # use bevy_ecs::prelude::*; - /// # #[derive(Component)] - /// # struct Player; - /// # #[derive(Component)] - /// # struct Position(f32, f32); - /// fn player_system(query: Query<&Position, With>) { - /// let player_position = query.single(); - /// // do something with player_position - /// } - /// # bevy_ecs::system::assert_is_system(player_system); - /// ``` - /// - /// # See also - /// - /// - [`get_single`](Self::get_single) for the non-panicking version. - /// - [`single_mut`](Self::single_mut) to get the mutable query item. - #[track_caller] - pub fn single(&self) -> ROQueryItem<'_, D> { - self.get_single().unwrap() - } - /// Returns a single read-only query item when there is exactly one entity matching the query. /// /// If the number of query items is not exactly one, a [`QuerySingleError`] is returned instead. @@ -1400,7 +1975,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # #[derive(Component)] /// # struct PlayerScore(i32); /// fn player_scoring_system(query: Query<&PlayerScore>) { - /// match query.get_single() { + /// match query.single() { /// Ok(PlayerScore(score)) => { /// println!("Score: {}", score); /// } @@ -1417,43 +1992,16 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// # See also /// - /// - [`get_single_mut`](Self::get_single_mut) to get the mutable query item. - /// - [`single`](Self::single) for the panicking version. + /// - [`single_mut`](Self::single_mut) to get the mutable query item. #[inline] - pub fn get_single(&self) -> Result, QuerySingleError> { - self.as_readonly().get_single_inner() + pub fn single(&self) -> Result, QuerySingleError> { + self.as_readonly().single_inner() } - /// Returns a single query item when there is exactly one entity matching the query. - /// - /// # Panics - /// - /// This method panics if the number of query items is **not** exactly one. - /// - /// # Example - /// - /// ``` - /// # use bevy_ecs::prelude::*; - /// # - /// # #[derive(Component)] - /// # struct Player; - /// # #[derive(Component)] - /// # struct Health(u32); - /// # - /// fn regenerate_player_health_system(mut query: Query<&mut Health, With>) { - /// let mut health = query.single_mut(); - /// health.0 += 1; - /// } - /// # bevy_ecs::system::assert_is_system(regenerate_player_health_system); - /// ``` - /// - /// # See also - /// - /// - [`get_single_mut`](Self::get_single_mut) for the non-panicking version. - /// - [`single`](Self::single) to get the read-only query item. - #[track_caller] - pub fn single_mut(&mut self) -> D::Item<'_> { - self.get_single_mut().unwrap() + /// A deprecated alias for [`single`](Self::single). + #[deprecated(since = "0.16.0", note = "Please use `single` instead")] + pub fn get_single(&self) -> Result, QuerySingleError> { + self.single() } /// Returns a single query item when there is exactly one entity matching the query. @@ -1471,7 +2019,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # struct Health(u32); /// # /// fn regenerate_player_health_system(mut query: Query<&mut Health, With>) { - /// let mut health = query.get_single_mut().expect("Error: Could not find a single player."); + /// let mut health = query.single_mut().expect("Error: Could not find a single player."); /// health.0 += 1; /// } /// # bevy_ecs::system::assert_is_system(regenerate_player_health_system); @@ -1479,11 +2027,16 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// # See also /// - /// - [`get_single`](Self::get_single) to get the read-only query item. - /// - [`single_mut`](Self::single_mut) for the panicking version. + /// - [`single`](Self::single) to get the read-only query item. #[inline] + pub fn single_mut(&mut self) -> Result, QuerySingleError> { + self.reborrow().single_inner() + } + + /// A deprecated alias for [`single_mut`](Self::single_mut). + #[deprecated(since = "0.16.0", note = "Please use `single_mut` instead")] pub fn get_single_mut(&mut self) -> Result, QuerySingleError> { - self.reborrow().get_single_inner() + self.single_mut() } /// Returns a single query item when there is exactly one entity matching the query. @@ -1502,7 +2055,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # struct Health(u32); /// # /// fn regenerate_player_health_system(query: Query<&mut Health, With>) { - /// let mut health = query.get_single_inner().expect("Error: Could not find a single player."); + /// let mut health = query.single_inner().expect("Error: Could not find a single player."); /// health.0 += 1; /// } /// # bevy_ecs::system::assert_is_system(regenerate_player_health_system); @@ -1510,16 +2063,21 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// # See also /// - /// - [`get_single`](Self::get_single) to get the read-only query item. - /// - [`get_single_mut`](Self::get_single_mut) to get the mutable query item. + /// - [`single`](Self::single) to get the read-only query item. + /// - [`single_mut`](Self::single_mut) to get the mutable query item. + /// - [`single_inner`](Self::single_inner) for the panicking version. #[inline] - pub fn get_single_inner(self) -> Result, QuerySingleError> { - // SAFETY: - // the query ensures mutable access to the components it accesses, and the query - // is uniquely borrowed - unsafe { - self.state - .get_single_unchecked_manual(self.world, self.last_run, self.this_run) + pub fn single_inner(self) -> Result, QuerySingleError> { + let mut query = self.into_iter(); + let first = query.next(); + let extra = query.next().is_some(); + + match (first, extra) { + (Some(r), false) => Ok(r), + (None, _) => Err(QuerySingleError::NoEntities(core::any::type_name::())), + (Some(_), _) => Err(QuerySingleError::MultipleEntities(core::any::type_name::< + Self, + >())), } } @@ -1553,14 +2111,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// [`Changed`]: crate::query::Changed #[inline] pub fn is_empty(&self) -> bool { - // SAFETY: - // - `self.world` has permission to read any data required by the WorldQuery. - // - `&self` ensures that no one currently has write access. - // - `self.world` matches `self.state`. - unsafe { - self.state - .is_empty_unsafe_world_cell(self.world, self.last_run, self.this_run) - } + self.as_nop().iter().next().is_none() } /// Returns `true` if the given [`Entity`] matches the query. @@ -1589,13 +2140,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// ``` #[inline] pub fn contains(&self, entity: Entity) -> bool { - // SAFETY: NopFetch does not access any members while &self ensures no one has exclusive access - unsafe { - self.state - .as_nop() - .get_unchecked_manual(self.world, entity, self.last_run, self.this_run) - .is_ok() - } + self.as_nop().get(entity).is_ok() } /// Returns a [`QueryLens`] that can be used to get a query with a more general fetch. @@ -1627,7 +2172,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # world.spawn((A(10), B(5))); /// # /// fn reusable_function(lens: &mut QueryLens<&A>) { - /// assert_eq!(lens.query().single().0, 10); + /// assert_eq!(lens.query().single().unwrap().0, 10); /// } /// /// // We can use the function in a system that takes the exact query. @@ -1786,7 +2331,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # world.spawn((A(10), B(5))); /// # /// fn reusable_function(mut lens: QueryLens<&A>) { - /// assert_eq!(lens.query().single().0, 10); + /// assert_eq!(lens.query().single().unwrap().0, 10); /// } /// /// // We can use the function in a system that takes the exact query. @@ -1935,10 +2480,10 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// Like `transmute_lens` the query terms can be changed with some restrictions. /// See [`Self::transmute_lens`] for more details. - pub fn join( - &mut self, - other: &mut Query, - ) -> QueryLens<'_, NewD> { + pub fn join<'a, OtherD: QueryData, NewD: QueryData>( + &'a mut self, + other: &'a mut Query, + ) -> QueryLens<'a, NewD> { self.join_filtered(other) } @@ -1964,7 +2509,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// - [`join`](Self::join) to join using a mutable borrow of the [`Query`]. pub fn join_inner( self, - other: &mut Query, + other: Query<'w, '_, OtherD>, ) -> QueryLens<'w, NewD> { self.join_filtered_inner(other) } @@ -1977,15 +2522,16 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// terms like `Added` and `Changed` will only be respected if they are in /// the type signature. pub fn join_filtered< + 'a, OtherD: QueryData, OtherF: QueryFilter, NewD: QueryData, NewF: QueryFilter, >( - &mut self, - other: &mut Query, - ) -> QueryLens<'_, NewD, NewF> { - self.reborrow().join_filtered_inner(other) + &'a mut self, + other: &'a mut Query, + ) -> QueryLens<'a, NewD, NewF> { + self.reborrow().join_filtered_inner(other.reborrow()) } /// Equivalent to [`Self::join_inner`] but also includes a [`QueryFilter`] type. @@ -2007,7 +2553,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { NewF: QueryFilter, >( self, - other: &mut Query, + other: Query<'w, '_, OtherD, OtherF>, ) -> QueryLens<'w, NewD, NewF> { let state = self .state @@ -2030,10 +2576,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> IntoIterator for Query<'w, 's, D, F> // - `self.world` has permission to access the required components. // - We consume the query, so mutable queries cannot alias. // Read-only queries are `Copy`, but may alias themselves. - unsafe { - self.state - .iter_unchecked_manual(self.world, self.last_run, self.this_run) - } + unsafe { QueryIter::new(self.world, self.state, self.last_run, self.this_run) } } } @@ -2097,7 +2640,21 @@ pub struct QueryLens<'w, Q: QueryData, F: QueryFilter = ()> { impl<'w, Q: QueryData, F: QueryFilter> QueryLens<'w, Q, F> { /// Create a [`Query`] from the underlying [`QueryState`]. - pub fn query(&mut self) -> Query<'w, '_, Q, F> { + pub fn query(&mut self) -> Query<'_, '_, Q, F> { + Query { + world: self.world, + state: &self.state, + last_run: self.last_run, + this_run: self.this_run, + } + } +} + +impl<'w, Q: ReadOnlyQueryData, F: QueryFilter> QueryLens<'w, Q, F> { + /// Create a [`Query`] from the underlying [`QueryState`]. + /// This returns results with the actual "inner" world lifetime, + /// so it may only be used with read-only queries to prevent mutable aliasing. + pub fn query_inner(&self) -> Query<'w, '_, Q, F> { Query { world: self.world, state: &self.state, @@ -2108,9 +2665,9 @@ impl<'w, Q: QueryData, F: QueryFilter> QueryLens<'w, Q, F> { } impl<'w, 's, Q: QueryData, F: QueryFilter> From<&'s mut QueryLens<'w, Q, F>> - for Query<'w, 's, Q, F> + for Query<'s, 's, Q, F> { - fn from(value: &'s mut QueryLens<'w, Q, F>) -> Query<'w, 's, Q, F> { + fn from(value: &'s mut QueryLens<'w, Q, F>) -> Query<'s, 's, Q, F> { value.query() } } @@ -2126,7 +2683,7 @@ impl<'w, 'q, Q: QueryData, F: QueryFilter> From<&'q mut Query<'w, '_, Q, F>> /// [System parameter] that provides access to single entity's components, much like [`Query::single`]/[`Query::single_mut`]. /// /// This [`SystemParam`](crate::system::SystemParam) fails validation if zero or more than one matching entity exists. -/// This will cause a panic, but can be configured to do nothing or warn once. +/// This will cause the system to be skipped, according to the rules laid out in [`SystemParamValidationError`](crate::system::SystemParamValidationError). /// /// Use [`Option>`] instead if zero or one matching entities can exist. /// @@ -2162,7 +2719,7 @@ impl<'w, D: QueryData, F: QueryFilter> Single<'w, D, F> { /// [System parameter] that works very much like [`Query`] except it always contains at least one matching entity. /// /// This [`SystemParam`](crate::system::SystemParam) fails validation if no matching entities exist. -/// This will cause a panic, but can be configured to do nothing or warn once. +/// This will cause the system to be skipped, according to the rules laid out in [`SystemParamValidationError`](crate::system::SystemParamValidationError). /// /// Much like [`Query::is_empty`] the worst case runtime will be `O(n)` where `n` is the number of *potential* matches. /// This can be notably expensive for queries that rely on non-archetypal filters such as [`Added`](crate::query::Added) or [`Changed`](crate::query::Changed) @@ -2193,3 +2750,51 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Populated<'w, 's, D, F> { self.0 } } + +#[cfg(test)] +mod tests { + use crate::{prelude::*, query::QueryEntityError}; + use alloc::vec::Vec; + + #[test] + fn get_many_uniqueness() { + let mut world = World::new(); + + let entities: Vec = (0..10).map(|_| world.spawn_empty().id()).collect(); + + let mut query_state = world.query::(); + + // It's best to test get_many_mut_inner directly, as it is shared + // We don't care about aliased mutability for the read-only equivalent + + // SAFETY: Query does not access world data. + assert!(query_state + .query_mut(&mut world) + .get_many_mut_inner::<10>(entities.clone().try_into().unwrap()) + .is_ok()); + + assert_eq!( + query_state + .query_mut(&mut world) + .get_many_mut_inner([entities[0], entities[0]]) + .unwrap_err(), + QueryEntityError::AliasedMutability(entities[0]) + ); + + assert_eq!( + query_state + .query_mut(&mut world) + .get_many_mut_inner([entities[0], entities[1], entities[0]]) + .unwrap_err(), + QueryEntityError::AliasedMutability(entities[0]) + ); + + assert_eq!( + query_state + .query_mut(&mut world) + .get_many_mut_inner([entities[9], entities[9]]) + .unwrap_err(), + QueryEntityError::AliasedMutability(entities[9]) + ); + } +} diff --git a/crates/bevy_ecs/src/system/schedule_system.rs b/crates/bevy_ecs/src/system/schedule_system.rs index e0005f06f4..75fad2b7e9 100644 --- a/crates/bevy_ecs/src/system/schedule_system.rs +++ b/crates/bevy_ecs/src/system/schedule_system.rs @@ -3,25 +3,25 @@ use alloc::{borrow::Cow, vec::Vec}; use crate::{ archetype::ArchetypeComponentId, component::{ComponentId, Tick}, + error::Result, query::Access, - result::Result, system::{input::SystemIn, BoxedSystem, System}, world::{unsafe_world_cell::UnsafeWorldCell, DeferredWorld, World}, }; -use super::IntoSystem; +use super::{IntoSystem, SystemParamValidationError}; /// A wrapper system to change a system that returns `()` to return `Ok(())` to make it into a [`ScheduleSystem`] -pub struct InfallibleSystemWrapper>(S); +pub struct InfallibleSystemWrapper>(S); -impl> InfallibleSystemWrapper { +impl> InfallibleSystemWrapper { /// Create a new `OkWrapperSystem` pub fn new(system: S) -> Self { Self(IntoSystem::into_system(system)) } } -impl> System for InfallibleSystemWrapper { +impl> System for InfallibleSystemWrapper { type In = (); type Out = Result; @@ -65,12 +65,6 @@ impl> System for InfallibleSystemWrapper { Ok(()) } - #[inline] - fn run(&mut self, input: SystemIn<'_, Self>, world: &mut World) -> Self::Out { - self.0.run(input, world); - Ok(()) - } - #[inline] fn apply_deferred(&mut self, world: &mut World) { self.0.apply_deferred(world); @@ -82,7 +76,10 @@ impl> System for InfallibleSystemWrapper { } #[inline] - unsafe fn validate_param_unsafe(&mut self, world: UnsafeWorldCell) -> bool { + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { self.0.validate_param_unsafe(world) } diff --git a/crates/bevy_ecs/src/system/system.rs b/crates/bevy_ecs/src/system/system.rs index d0990e907f..69f5ae980a 100644 --- a/crates/bevy_ecs/src/system/system.rs +++ b/crates/bevy_ecs/src/system/system.rs @@ -18,7 +18,7 @@ use crate::{ use alloc::{borrow::Cow, boxed::Box, vec::Vec}; use core::any::TypeId; -use super::IntoSystem; +use super::{IntoSystem, SystemParamValidationError}; /// An ECS system that can be added to a [`Schedule`](crate::schedule::Schedule) /// @@ -30,7 +30,7 @@ use super::IntoSystem; /// /// Systems are executed in parallel, in opportunistic order; data access is managed automatically. /// It's possible to specify explicit execution order between specific systems, -/// see [`IntoSystemConfigs`](crate::schedule::IntoSystemConfigs). +/// see [`IntoScheduleConfigs`](crate::schedule::IntoScheduleConfigs). #[diagnostic::on_unimplemented(message = "`{Self}` is not a system", label = "invalid system")] pub trait System: Send + Sync + 'static { /// The system's input. @@ -69,6 +69,8 @@ pub trait System: Send + Sync + 'static { /// - The caller must ensure that [`world`](UnsafeWorldCell) has permission to access any world data /// registered in `archetype_component_access`. There must be no conflicting /// simultaneous accesses while the system is running. + /// - If [`System::is_exclusive`] returns `true`, then it must be valid to call + /// [`UnsafeWorldCell::world_mut`] on `world`. /// - The method [`System::update_archetype_component_access`] must be called at some /// point before this one, with the same exact [`World`]. If [`System::update_archetype_component_access`] /// panics (or otherwise does not return for any reason), this method must not be called. @@ -83,14 +85,25 @@ pub trait System: Send + Sync + 'static { /// /// [`run_readonly`]: ReadOnlySystem::run_readonly fn run(&mut self, input: SystemIn<'_, Self>, world: &mut World) -> Self::Out { + let ret = self.run_without_applying_deferred(input, world); + self.apply_deferred(world); + ret + } + + /// Runs the system with the given input in the world. + /// + /// [`run_readonly`]: ReadOnlySystem::run_readonly + fn run_without_applying_deferred( + &mut self, + input: SystemIn<'_, Self>, + world: &mut World, + ) -> Self::Out { let world_cell = world.as_unsafe_world_cell(); self.update_archetype_component_access(world_cell); // SAFETY: // - We have exclusive access to the entire world. // - `update_archetype_component_access` has been called. - let ret = unsafe { self.run_unsafe(input, world_cell) }; - self.apply_deferred(world); - ret + unsafe { self.run_unsafe(input, world_cell) } } /// Applies any [`Deferred`](crate::system::Deferred) system parameters (or other system buffers) of this system to the world. @@ -121,11 +134,14 @@ pub trait System: Send + Sync + 'static { /// - The method [`System::update_archetype_component_access`] must be called at some /// point before this one, with the same exact [`World`]. If [`System::update_archetype_component_access`] /// panics (or otherwise does not return for any reason), this method must not be called. - unsafe fn validate_param_unsafe(&mut self, world: UnsafeWorldCell) -> bool; + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError>; /// Safe version of [`System::validate_param_unsafe`]. /// that runs on exclusive, single-threaded `world` pointer. - fn validate_param(&mut self, world: &World) -> bool { + fn validate_param(&mut self, world: &World) -> Result<(), SystemParamValidationError> { let world_cell = world.as_unsafe_world_cell_readonly(); self.update_archetype_component_access(world_cell); // SAFETY: @@ -139,7 +155,7 @@ pub trait System: Send + Sync + 'static { /// Update the system's archetype component [`Access`]. /// - /// ## Note for implementors + /// ## Note for implementers /// `world` may only be used to access metadata. This can be done in safe code /// via functions such as [`UnsafeWorldCell::archetypes`]. fn update_archetype_component_access(&mut self, world: UnsafeWorldCell); @@ -352,36 +368,35 @@ impl RunSystemOnce for &mut World { { let mut system: T::System = IntoSystem::into_system(system); system.initialize(self); - if system.validate_param(self) { - Ok(system.run(input, self)) - } else { - Err(RunSystemError::InvalidParams(system.name())) - } + system + .validate_param(self) + .map_err(|err| RunSystemError::InvalidParams { + system: system.name(), + err, + })?; + Ok(system.run(input, self)) } } /// Running system failed. -#[derive(Error)] +#[derive(Error, Debug)] pub enum RunSystemError { /// System could not be run due to parameters that failed validation. - /// - /// This can occur because the data required by the system was not present in the world. - #[error("The data required by the system {0:?} was not found in the world and the system did not run due to failed parameter validation.")] - InvalidParams(Cow<'static, str>), -} - -impl Debug for RunSystemError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - Self::InvalidParams(arg0) => f.debug_tuple("InvalidParams").field(arg0).finish(), - } - } + /// This should not be considered an error if [`field@SystemParamValidationError::skipped`] is `true`. + #[error("System {system} did not run due to failed parameter validation: {err}")] + InvalidParams { + /// The identifier of the system that was run. + system: Cow<'static, str>, + /// The returned parameter validation error. + err: SystemParamValidationError, + }, } #[cfg(test)] mod tests { use super::*; use crate::prelude::*; + use alloc::string::ToString; #[test] fn run_system_once() { @@ -451,8 +466,10 @@ mod tests { let mut world = World::default(); // This fails because `T` has not been added to the world yet. - let result = world.run_system_once(system.warn_param_missing()); + let result = world.run_system_once(system); - assert!(matches!(result, Err(RunSystemError::InvalidParams(_)))); + assert!(matches!(result, Err(RunSystemError::InvalidParams { .. }))); + let expected = "System bevy_ecs::system::system::tests::run_system_once_invalid_params::system did not run due to failed parameter validation: Parameter `Res` failed validation: Resource does not exist"; + assert_eq!(expected, result.unwrap_err().to_string()); } } diff --git a/crates/bevy_ecs/src/system/system_param.rs b/crates/bevy_ecs/src/system/system_param.rs index 3a464e82a1..99d4c72df6 100644 --- a/crates/bevy_ecs/src/system/system_param.rs +++ b/crates/bevy_ecs/src/system/system_param.rs @@ -2,7 +2,7 @@ pub use crate::change_detection::{NonSendMut, Res, ResMut}; use crate::{ archetype::{Archetype, Archetypes}, bundle::Bundles, - change_detection::{Ticks, TicksMut}, + change_detection::{MaybeLocation, Ticks, TicksMut}, component::{ComponentId, ComponentTicks, Components, Tick}, entity::Entities, query::{ @@ -17,19 +17,23 @@ use crate::{ FromWorld, World, }, }; -use alloc::{borrow::ToOwned, boxed::Box, vec::Vec}; +use alloc::{ + borrow::{Cow, ToOwned}, + boxed::Box, + vec::Vec, +}; pub use bevy_ecs_macros::SystemParam; use bevy_ptr::UnsafeCellDeref; use bevy_utils::synccell::SyncCell; -#[cfg(feature = "track_location")] -use core::panic::Location; use core::{ any::Any, - fmt::Debug, + fmt::{Debug, Display}, marker::PhantomData, ops::{Deref, DerefMut}, + panic::Location, }; use disqualified::ShortName; +use thiserror::Error; use super::Populated; use variadics_please::{all_tuples, all_tuples_enumerated}; @@ -127,6 +131,29 @@ use variadics_please::{all_tuples, all_tuples_enumerated}; /// This will most commonly occur when working with `SystemParam`s generically, as the requirement /// has not been proven to the compiler. /// +/// ## Custom Validation Messages +/// +/// When using the derive macro, any [`SystemParamValidationError`]s will be propagated from the sub-parameters. +/// If you want to override the error message, add a `#[system_param(validation_message = "New message")]` attribute to the parameter. +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// # #[derive(Resource)] +/// # struct SomeResource; +/// # use bevy_ecs::system::SystemParam; +/// # +/// #[derive(SystemParam)] +/// struct MyParam<'w> { +/// #[system_param(validation_message = "Custom Message")] +/// foo: Res<'w, SomeResource>, +/// } +/// +/// let mut world = World::new(); +/// let err = world.run_system_cached(|param: MyParam| {}).unwrap_err(); +/// let expected = "Parameter `MyParam::foo` failed validation: Custom Message"; +/// assert!(err.to_string().ends_with(expected)); +/// ``` +/// /// ## Builders /// /// If you want to use a [`SystemParamBuilder`](crate::system::SystemParamBuilder) with a derived [`SystemParam`] implementation, @@ -233,7 +260,11 @@ pub unsafe trait SystemParam: Sized { fn queue(state: &mut Self::State, system_meta: &SystemMeta, world: DeferredWorld) {} /// Validates that the param can be acquired by the [`get_param`](SystemParam::get_param). - /// Built-in executors use this to prevent systems with invalid params from running. + /// + /// Built-in executors use this to prevent systems with invalid params from running, + /// and any failures here will be bubbled up to the default error handler defined in [`bevy_ecs::error`], + /// with a value of type [`SystemParamValidationError`]. + /// /// For nested [`SystemParam`]s validation will fail if any /// delegated validation fails. /// @@ -253,6 +284,9 @@ pub unsafe trait SystemParam: Sized { /// world mutations inbetween. Otherwise, while it won't lead to any undefined behavior, /// the validity of the param may change. /// + /// [`System::validate_param`](super::system::System::validate_param), + /// calls this method for each supplied system param. + /// /// # Safety /// /// - The passed [`UnsafeWorldCell`] must have read-only access to world data @@ -267,20 +301,18 @@ pub unsafe trait SystemParam: Sized { state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { - // By default we allow panics in [`SystemParam::get_param`] and return `true`. - // Preventing panics is an optional feature. - true + ) -> Result<(), SystemParamValidationError> { + Ok(()) } /// Creates a parameter to be passed into a [`SystemParamFunction`](super::SystemParamFunction). /// /// # Safety /// - /// - The passed [`UnsafeWorldCell`] must have access to any world data - /// registered in [`init_state`](SystemParam::init_state). + /// - The passed [`UnsafeWorldCell`] must have access to any world data registered + /// in [`init_state`](SystemParam::init_state). /// - `world` must be the same [`World`] that was used to initialize [`state`](SystemParam::init_state). - /// - all `world`'s archetypes have been processed by [`new_archetype`](SystemParam::new_archetype). + /// - All `world`'s archetypes have been processed by [`new_archetype`](SystemParam::new_archetype). unsafe fn get_param<'world, 'state>( state: &'state mut Self::State, system_meta: &SystemMeta, @@ -334,6 +366,7 @@ unsafe impl SystemParam for Qu // SAFETY: We have registered all of the query's world accesses, // so the caller ensures that `world` has permission to access any // world data that the query needs. + // The caller ensures the world matches the one used in init_state. unsafe { state.query_unchecked_manual_with_ticks(world, system_meta.last_run, change_tick) } } } @@ -402,13 +435,13 @@ unsafe impl<'a, D: QueryData + 'static, F: QueryFilter + 'static> SystemParam fo world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - state.validate_world(world.id()); // SAFETY: State ensures that the components it accesses are not accessible somewhere elsewhere. + // The caller ensures the world matches the one used in init_state. let query = unsafe { state.query_unchecked_manual_with_ticks(world, system_meta.last_run, change_tick) }; let single = query - .get_single_inner() + .single_inner() .expect("The query was expected to contain exactly one matching entity."); Single { item: single, @@ -421,10 +454,10 @@ unsafe impl<'a, D: QueryData + 'static, F: QueryFilter + 'static> SystemParam fo state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { - state.validate_world(world.id()); + ) -> Result<(), SystemParamValidationError> { // SAFETY: State ensures that the components it accesses are not mutably accessible elsewhere // and the query is read only. + // The caller ensures the world matches the one used in init_state. let query = unsafe { state.query_unchecked_manual_with_ticks( world, @@ -432,11 +465,15 @@ unsafe impl<'a, D: QueryData + 'static, F: QueryFilter + 'static> SystemParam fo world.change_tick(), ) }; - let is_valid = query.get_single_inner().is_ok(); - if !is_valid { - system_meta.try_warn_param::(); + match query.single_inner() { + Ok(_) => Ok(()), + Err(QuerySingleError::NoEntities(_)) => Err( + SystemParamValidationError::skipped::("No matching entities"), + ), + Err(QuerySingleError::MultipleEntities(_)) => Err( + SystemParamValidationError::skipped::("Multiple matching entities"), + ), } - is_valid } } @@ -470,10 +507,11 @@ unsafe impl<'a, D: QueryData + 'static, F: QueryFilter + 'static> SystemParam ) -> Self::Item<'w, 's> { state.validate_world(world.id()); // SAFETY: State ensures that the components it accesses are not accessible elsewhere. + // The caller ensures the world matches the one used in init_state. let query = unsafe { state.query_unchecked_manual_with_ticks(world, system_meta.last_run, change_tick) }; - match query.get_single_inner() { + match query.single_inner() { Ok(single) => Some(Single { item: single, _filter: PhantomData, @@ -488,10 +526,10 @@ unsafe impl<'a, D: QueryData + 'static, F: QueryFilter + 'static> SystemParam state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { - state.validate_world(world.id()); + ) -> Result<(), SystemParamValidationError> { // SAFETY: State ensures that the components it accesses are not mutably accessible elsewhere // and the query is read only. + // The caller ensures the world matches the one used in init_state. let query = unsafe { state.query_unchecked_manual_with_ticks( world, @@ -499,12 +537,12 @@ unsafe impl<'a, D: QueryData + 'static, F: QueryFilter + 'static> SystemParam world.change_tick(), ) }; - let result = query.get_single_inner(); - let is_valid = !matches!(result, Err(QuerySingleError::MultipleEntities(_))); - if !is_valid { - system_meta.try_warn_param::(); + match query.single_inner() { + Ok(_) | Err(QuerySingleError::NoEntities(_)) => Ok(()), + Err(QuerySingleError::MultipleEntities(_)) => Err( + SystemParamValidationError::skipped::("Multiple matching entities"), + ), } - is_valid } } @@ -558,13 +596,23 @@ unsafe impl SystemParam state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { - state.validate_world(world.id()); + ) -> Result<(), SystemParamValidationError> { // SAFETY: // - We have read-only access to the components accessed by query. - // - The world has been validated. - !unsafe { - state.is_empty_unsafe_world_cell(world, system_meta.last_run, world.change_tick()) + // - The caller ensures the world matches the one used in init_state. + let query = unsafe { + state.query_unchecked_manual_with_ticks( + world, + system_meta.last_run, + world.change_tick(), + ) + }; + if query.is_empty() { + Err(SystemParamValidationError::skipped::( + "No matching entities", + )) + } else { + Ok(()) } } } @@ -680,7 +728,7 @@ unsafe impl<'w, 's, D: ReadOnlyQueryData + 'static, F: QueryFilter + 'static> Re /// // ... /// # let _event = event; /// } -/// set.p1().send(MyEvent::new()); +/// set.p1().write(MyEvent::new()); /// /// let entities = set.p2().entities(); /// // ... @@ -760,7 +808,7 @@ macro_rules! impl_param_set { state: &'s Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell<'w>, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { <($($param,)*) as SystemParam>::validate_param(state, system_meta, world) } @@ -812,7 +860,7 @@ unsafe impl<'a, T: Resource> SystemParam for Res<'a, T> { type Item<'w, 's> = Res<'w, T>; fn init_state(world: &mut World, system_meta: &mut SystemMeta) -> Self::State { - let component_id = world.components.register_resource::(); + let component_id = world.components_registrator().register_resource::(); let archetype_component_id = world.initialize_resource_internal(component_id).id(); let combined_access = system_meta.component_access_set.combined_access(); @@ -836,18 +884,21 @@ unsafe impl<'a, T: Resource> SystemParam for Res<'a, T> { #[inline] unsafe fn validate_param( &component_id: &Self::State, - system_meta: &SystemMeta, + _system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { // SAFETY: Read-only access to resource metadata. - let is_valid = unsafe { world.storages() } + if unsafe { world.storages() } .resources .get(component_id) - .is_some_and(ResourceData::is_present); - if !is_valid { - system_meta.try_warn_param::(); + .is_some_and(ResourceData::is_present) + { + Ok(()) + } else { + Err(SystemParamValidationError::invalid::( + "Resource does not exist", + )) } - is_valid } #[inline] @@ -857,7 +908,7 @@ unsafe impl<'a, T: Resource> SystemParam for Res<'a, T> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - let (ptr, ticks, _caller) = + let (ptr, ticks, caller) = world .get_resource_with_ticks(component_id) .unwrap_or_else(|| { @@ -875,8 +926,7 @@ unsafe impl<'a, T: Resource> SystemParam for Res<'a, T> { last_run: system_meta.last_run, this_run: change_tick, }, - #[cfg(feature = "track_location")] - changed_by: _caller.deref(), + changed_by: caller.map(|caller| caller.deref()), } } } @@ -902,7 +952,7 @@ unsafe impl<'a, T: Resource> SystemParam for Option> { ) -> Self::Item<'w, 's> { world .get_resource_with_ticks(component_id) - .map(|(ptr, ticks, _caller)| Res { + .map(|(ptr, ticks, caller)| Res { value: ptr.deref(), ticks: Ticks { added: ticks.added.deref(), @@ -910,8 +960,7 @@ unsafe impl<'a, T: Resource> SystemParam for Option> { last_run: system_meta.last_run, this_run: change_tick, }, - #[cfg(feature = "track_location")] - changed_by: _caller.deref(), + changed_by: caller.map(|caller| caller.deref()), }) } } @@ -923,7 +972,7 @@ unsafe impl<'a, T: Resource> SystemParam for ResMut<'a, T> { type Item<'w, 's> = ResMut<'w, T>; fn init_state(world: &mut World, system_meta: &mut SystemMeta) -> Self::State { - let component_id = world.components.register_resource::(); + let component_id = world.components_registrator().register_resource::(); let archetype_component_id = world.initialize_resource_internal(component_id).id(); let combined_access = system_meta.component_access_set.combined_access(); @@ -950,18 +999,21 @@ unsafe impl<'a, T: Resource> SystemParam for ResMut<'a, T> { #[inline] unsafe fn validate_param( &component_id: &Self::State, - system_meta: &SystemMeta, + _system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { // SAFETY: Read-only access to resource metadata. - let is_valid = unsafe { world.storages() } + if unsafe { world.storages() } .resources .get(component_id) - .is_some_and(ResourceData::is_present); - if !is_valid { - system_meta.try_warn_param::(); + .is_some_and(ResourceData::is_present) + { + Ok(()) + } else { + Err(SystemParamValidationError::invalid::( + "Resource does not exist", + )) } - is_valid } #[inline] @@ -988,7 +1040,6 @@ unsafe impl<'a, T: Resource> SystemParam for ResMut<'a, T> { last_run: system_meta.last_run, this_run: change_tick, }, - #[cfg(feature = "track_location")] changed_by: value.changed_by, } } @@ -1020,7 +1071,6 @@ unsafe impl<'a, T: Resource> SystemParam for Option> { last_run: system_meta.last_run, this_run: change_tick, }, - #[cfg(feature = "track_location")] changed_by: value.changed_by, }) } @@ -1419,6 +1469,33 @@ unsafe impl SystemParam for Deferred<'_, T> { } } +/// A dummy type that is [`!Send`](Send), to force systems to run on the main thread. +pub struct NonSendMarker; + +// SAFETY: No world access. +unsafe impl SystemParam for NonSendMarker { + type State = (); + type Item<'w, 's> = Self; + + #[inline] + fn init_state(_world: &mut World, system_meta: &mut SystemMeta) -> Self::State { + system_meta.set_non_send(); + } + + #[inline] + unsafe fn get_param<'world, 'state>( + _state: &'state mut Self::State, + _system_meta: &SystemMeta, + _world: UnsafeWorldCell<'world>, + _change_tick: Tick, + ) -> Self::Item<'world, 'state> { + Self + } +} + +// SAFETY: Does not read any world state +unsafe impl ReadOnlySystemParam for NonSendMarker {} + /// Shared borrow of a non-[`Send`] resource. /// /// Only `Send` resources may be accessed with the [`Res`] [`SystemParam`]. In case that the @@ -1435,8 +1512,7 @@ pub struct NonSend<'w, T: 'static> { ticks: ComponentTicks, last_run: Tick, this_run: Tick, - #[cfg(feature = "track_location")] - changed_by: &'static Location<'static>, + changed_by: MaybeLocation<&'w &'static Location<'static>>, } // SAFETY: Only reads a single World non-send resource @@ -1463,9 +1539,8 @@ impl<'w, T: 'static> NonSend<'w, T> { } /// The location that last caused this to change. - #[cfg(feature = "track_location")] - pub fn changed_by(&self) -> &'static Location<'static> { - self.changed_by + pub fn changed_by(&self) -> MaybeLocation { + self.changed_by.copied() } } @@ -1486,8 +1561,7 @@ impl<'a, T> From> for NonSend<'a, T> { }, this_run: nsm.ticks.this_run, last_run: nsm.ticks.last_run, - #[cfg(feature = "track_location")] - changed_by: nsm.changed_by, + changed_by: nsm.changed_by.map(|changed_by| &*changed_by), } } } @@ -1501,7 +1575,7 @@ unsafe impl<'a, T: 'static> SystemParam for NonSend<'a, T> { fn init_state(world: &mut World, system_meta: &mut SystemMeta) -> Self::State { system_meta.set_non_send(); - let component_id = world.components.register_non_send::(); + let component_id = world.components_registrator().register_non_send::(); let archetype_component_id = world.initialize_non_send_internal(component_id).id(); let combined_access = system_meta.component_access_set.combined_access(); @@ -1525,18 +1599,21 @@ unsafe impl<'a, T: 'static> SystemParam for NonSend<'a, T> { #[inline] unsafe fn validate_param( &component_id: &Self::State, - system_meta: &SystemMeta, + _system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { // SAFETY: Read-only access to resource metadata. - let is_valid = unsafe { world.storages() } + if unsafe { world.storages() } .non_send_resources .get(component_id) - .is_some_and(ResourceData::is_present); - if !is_valid { - system_meta.try_warn_param::(); + .is_some_and(ResourceData::is_present) + { + Ok(()) + } else { + Err(SystemParamValidationError::invalid::( + "Non-send resource does not exist", + )) } - is_valid } #[inline] @@ -1546,7 +1623,7 @@ unsafe impl<'a, T: 'static> SystemParam for NonSend<'a, T> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - let (ptr, ticks, _caller) = + let (ptr, ticks, caller) = world .get_non_send_with_ticks(component_id) .unwrap_or_else(|| { @@ -1562,8 +1639,7 @@ unsafe impl<'a, T: 'static> SystemParam for NonSend<'a, T> { ticks: ticks.read(), last_run: system_meta.last_run, this_run: change_tick, - #[cfg(feature = "track_location")] - changed_by: _caller.deref(), + changed_by: caller.map(|caller| caller.deref()), } } } @@ -1589,13 +1665,12 @@ unsafe impl SystemParam for Option> { ) -> Self::Item<'w, 's> { world .get_non_send_with_ticks(component_id) - .map(|(ptr, ticks, _caller)| NonSend { + .map(|(ptr, ticks, caller)| NonSend { value: ptr.deref(), ticks: ticks.read(), last_run: system_meta.last_run, this_run: change_tick, - #[cfg(feature = "track_location")] - changed_by: _caller.deref(), + changed_by: caller.map(|caller| caller.deref()), }) } } @@ -1609,7 +1684,7 @@ unsafe impl<'a, T: 'static> SystemParam for NonSendMut<'a, T> { fn init_state(world: &mut World, system_meta: &mut SystemMeta) -> Self::State { system_meta.set_non_send(); - let component_id = world.components.register_non_send::(); + let component_id = world.components_registrator().register_non_send::(); let archetype_component_id = world.initialize_non_send_internal(component_id).id(); let combined_access = system_meta.component_access_set.combined_access(); @@ -1636,18 +1711,21 @@ unsafe impl<'a, T: 'static> SystemParam for NonSendMut<'a, T> { #[inline] unsafe fn validate_param( &component_id: &Self::State, - system_meta: &SystemMeta, + _system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { // SAFETY: Read-only access to resource metadata. - let is_valid = unsafe { world.storages() } + if unsafe { world.storages() } .non_send_resources .get(component_id) - .is_some_and(ResourceData::is_present); - if !is_valid { - system_meta.try_warn_param::(); + .is_some_and(ResourceData::is_present) + { + Ok(()) + } else { + Err(SystemParamValidationError::invalid::( + "Non-send resource does not exist", + )) } - is_valid } #[inline] @@ -1657,7 +1735,7 @@ unsafe impl<'a, T: 'static> SystemParam for NonSendMut<'a, T> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - let (ptr, ticks, _caller) = + let (ptr, ticks, caller) = world .get_non_send_with_ticks(component_id) .unwrap_or_else(|| { @@ -1670,8 +1748,7 @@ unsafe impl<'a, T: 'static> SystemParam for NonSendMut<'a, T> { NonSendMut { value: ptr.assert_unique().deref_mut(), ticks: TicksMut::from_tick_cells(ticks, system_meta.last_run, change_tick), - #[cfg(feature = "track_location")] - changed_by: _caller.deref_mut(), + changed_by: caller.map(|caller| caller.deref_mut()), } } } @@ -1694,11 +1771,10 @@ unsafe impl<'a, T: 'static> SystemParam for Option> { ) -> Self::Item<'w, 's> { world .get_non_send_with_ticks(component_id) - .map(|(ptr, ticks, _caller)| NonSendMut { + .map(|(ptr, ticks, caller)| NonSendMut { value: ptr.assert_unique().deref_mut(), ticks: TicksMut::from_tick_cells(ticks, system_meta.last_run, change_tick), - #[cfg(feature = "track_location")] - changed_by: _caller.deref_mut(), + changed_by: caller.map(|caller| caller.deref_mut()), }) } } @@ -1857,10 +1933,11 @@ unsafe impl SystemParam for Vec { state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { - state - .iter() - .all(|state| T::validate_param(state, system_meta, world)) + ) -> Result<(), SystemParamValidationError> { + for state in state { + T::validate_param(state, system_meta, world)?; + } + Ok(()) } #[inline] @@ -2005,7 +2082,7 @@ macro_rules! impl_system_param_tuple { reason = "Zero-length tuples won't use some of the parameters." )] $(#[$meta])* - // SAFETY: implementors of each `SystemParam` in the tuple have validated their impls + // SAFETY: implementers of each `SystemParam` in the tuple have validated their impls unsafe impl<$($param: SystemParam),*> SystemParam for ($($param,)*) { type State = ($($param::State,)*); type Item<'w, 's> = ($($param::Item::<'w, 's>,)*); @@ -2044,9 +2121,12 @@ macro_rules! impl_system_param_tuple { state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { let ($($param,)*) = state; - $($param::validate_param($param, system_meta, world)&&)* true + $( + $param::validate_param($param, system_meta, world)?; + )* + Ok(()) } #[inline] @@ -2213,7 +2293,7 @@ unsafe impl SystemParam for StaticSystemParam<'_, '_, state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { P::validate_param(state, system_meta, world) } @@ -2459,7 +2539,11 @@ trait DynParamState: Sync + Send { /// /// # Safety /// Refer to [`SystemParam::validate_param`]. - unsafe fn validate_param(&self, system_meta: &SystemMeta, world: UnsafeWorldCell) -> bool; + unsafe fn validate_param( + &self, + system_meta: &SystemMeta, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError>; } /// A wrapper around a [`SystemParam::State`] that can be used as a trait object in a [`DynSystemParam`]. @@ -2483,7 +2567,11 @@ impl DynParamState for ParamState { T::queue(&mut self.0, system_meta, world); } - unsafe fn validate_param(&self, system_meta: &SystemMeta, world: UnsafeWorldCell) -> bool { + unsafe fn validate_param( + &self, + system_meta: &SystemMeta, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { T::validate_param(&self.0, system_meta, world) } } @@ -2503,7 +2591,7 @@ unsafe impl SystemParam for DynSystemParam<'_, '_> { state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { state.0.validate_param(system_meta, world) } @@ -2598,6 +2686,81 @@ unsafe impl SystemParam for FilteredResourcesMut<'_, '_> { } } +/// An error that occurs when a system parameter is not valid, +/// used by system executors to determine what to do with a system. +/// +/// Returned as an error from [`SystemParam::validate_param`], +/// and handled using the unified error handling mechanisms defined in [`bevy_ecs::error`]. +#[derive(Debug, PartialEq, Eq, Clone, Error)] +pub struct SystemParamValidationError { + /// Whether the system should be skipped. + /// + /// If `false`, the error should be handled. + /// By default, this will result in a panic. See [`crate::error`] for more information. + /// + /// This is the default behavior, and is suitable for system params that should *always* be valid, + /// either because sensible fallback behavior exists (like [`Query`] or because + /// failures in validation should be considered a bug in the user's logic that must be immediately addressed (like [`Res`]). + /// + /// If `true`, the system should be skipped. + /// This is suitable for system params that are intended to only operate in certain application states, such as [`Single`]. + pub skipped: bool, + + /// A message describing the validation error. + pub message: Cow<'static, str>, + + /// A string identifying the invalid parameter. + /// This is usually the type name of the parameter. + pub param: Cow<'static, str>, + + /// A string identifying the field within a parameter using `#[derive(SystemParam)]`. + /// This will be an empty string for other parameters. + /// + /// This will be printed after `param` in the `Display` impl, and should include a `::` prefix if non-empty. + pub field: Cow<'static, str>, +} + +impl SystemParamValidationError { + /// Constructs a `SystemParamValidationError` that skips the system. + /// The parameter name is initialized to the type name of `T`, so a `SystemParam` should usually pass `Self`. + pub fn skipped(message: impl Into>) -> Self { + Self::new::(true, message, Cow::Borrowed("")) + } + + /// Constructs a `SystemParamValidationError` for an invalid parameter that should be treated as an error. + /// The parameter name is initialized to the type name of `T`, so a `SystemParam` should usually pass `Self`. + pub fn invalid(message: impl Into>) -> Self { + Self::new::(false, message, Cow::Borrowed("")) + } + + /// Constructs a `SystemParamValidationError` for an invalid parameter. + /// The parameter name is initialized to the type name of `T`, so a `SystemParam` should usually pass `Self`. + pub fn new( + skipped: bool, + message: impl Into>, + field: impl Into>, + ) -> Self { + Self { + skipped, + message: message.into(), + param: Cow::Borrowed(core::any::type_name::()), + field: field.into(), + } + } +} + +impl Display for SystemParamValidationError { + fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!( + fmt, + "Parameter `{}{}` failed validation: {}", + ShortName(&self.param), + self.field, + self.message + ) + } +} + #[cfg(test)] mod tests { use super::*; @@ -2831,4 +2994,34 @@ mod tests { let _query: Query<()> = p.downcast_mut_inner().unwrap(); let _query: Query<()> = p.downcast().unwrap(); } + + #[test] + #[should_panic = "Encountered an error in system `bevy_ecs::system::system_param::tests::missing_resource_error::res_system`: Parameter `Res` failed validation: Resource does not exist"] + fn missing_resource_error() { + #[derive(Resource)] + pub struct MissingResource; + + let mut schedule = crate::schedule::Schedule::default(); + schedule.add_systems(res_system); + let mut world = World::new(); + schedule.run(&mut world); + + fn res_system(_: Res) {} + } + + #[test] + #[should_panic = "Encountered an error in system `bevy_ecs::system::system_param::tests::missing_event_error::event_system`: Parameter `EventReader::events` failed validation: Event not initialized"] + fn missing_event_error() { + use crate::prelude::{Event, EventReader}; + + #[derive(Event)] + pub struct MissingEvent; + + let mut schedule = crate::schedule::Schedule::default(); + schedule.add_systems(event_system); + let mut world = World::new(); + schedule.run(&mut world); + + fn event_system(_: EventReader) {} + } } diff --git a/crates/bevy_ecs/src/system/system_registry.rs b/crates/bevy_ecs/src/system/system_registry.rs index 2d849dda95..cf53b35be5 100644 --- a/crates/bevy_ecs/src/system/system_registry.rs +++ b/crates/bevy_ecs/src/system/system_registry.rs @@ -3,13 +3,13 @@ use crate::reflect::ReflectComponent; use crate::{ change_detection::Mut, entity::Entity, - system::{input::SystemInput, BoxedSystem, IntoSystem, System}, + system::{input::SystemInput, BoxedSystem, IntoSystem, SystemParamValidationError}, world::World, }; use alloc::boxed::Box; -use bevy_ecs_macros::{require, Component, Resource}; +use bevy_ecs_macros::{Component, Resource}; #[cfg(feature = "bevy_reflect")] -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use core::marker::PhantomData; use thiserror::Error; @@ -33,7 +33,7 @@ impl RegisteredSystem { /// Marker [`Component`](bevy_ecs::component::Component) for identifying [`SystemId`] [`Entity`]s. #[derive(Component, Default)] #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] -#[cfg_attr(feature = "bevy_reflect", reflect(Component))] +#[cfg_attr(feature = "bevy_reflect", reflect(Component, Default))] pub struct SystemIdMarker; /// A system that has been removed from the registry. @@ -126,7 +126,21 @@ impl core::fmt::Debug for SystemId { /// /// This resource is inserted by [`World::register_system_cached`]. #[derive(Resource)] -pub struct CachedSystemId(pub SystemId); +pub struct CachedSystemId { + /// The cached `SystemId` as an `Entity`. + pub entity: Entity, + _marker: PhantomData S>, +} + +impl CachedSystemId { + /// Creates a new `CachedSystemId` struct given a `SystemId`. + pub fn new(id: SystemId) -> Self { + Self { + entity: id.entity(), + _marker: PhantomData, + } + } +} impl World { /// Registers a system and returns a [`SystemId`] so it can later be called by [`World::run_system`]. @@ -154,7 +168,7 @@ impl World { /// Similar to [`Self::register_system`], but allows passing in a [`BoxedSystem`]. /// /// This is useful if the [`IntoSystem`] implementor has already been turned into a - /// [`System`] trait object and put in a [`Box`]. + /// [`System`](crate::system::System) trait object and put in a [`Box`]. pub fn register_boxed_system(&mut self, system: BoxedSystem) -> SystemId where I: SystemInput + 'static, @@ -199,12 +213,10 @@ impl World { /// This is different from [`RunSystemOnce::run_system_once`](crate::system::RunSystemOnce::run_system_once), /// because it keeps local state between calls and change detection works correctly. /// + /// Also runs any queued-up commands. + /// /// In order to run a chained system with an input, use [`World::run_system_with`] instead. /// - /// # Limitations - /// - /// - Stored systems cannot be recursive, they cannot call themselves through [`Commands::run_system`](crate::system::Commands). - /// /// # Examples /// /// ## Running a system @@ -291,9 +303,7 @@ impl World { /// Before running a system, it must first be registered. /// The method [`World::register_system`] stores a given system and returns a [`SystemId`]. /// - /// # Limitations - /// - /// - Stored systems cannot be recursive, they cannot call themselves through [`Commands::run_system`](crate::system::Commands). + /// Also runs any queued-up commands. /// /// # Examples /// @@ -322,12 +332,12 @@ impl World { I: SystemInput + 'static, O: 'static, { - // lookup + // Lookup let mut entity = self .get_entity_mut(id.entity) .map_err(|_| RegisteredSystemError::SystemIdNotRegistered(id))?; - // take ownership of system trait object + // Take ownership of system trait object let RegisteredSystem { mut initialized, mut system, @@ -335,25 +345,33 @@ impl World { .take::>() .ok_or(RegisteredSystemError::Recursive(id))?; - // run the system + // Run the system if !initialized { system.initialize(self); initialized = true; } - let result = if system.validate_param(self) { - Ok(system.run(input, self)) - } else { - Err(RegisteredSystemError::InvalidParams(id)) - }; + let result = system + .validate_param(self) + .map_err(|err| RegisteredSystemError::InvalidParams { system: id, err }) + .map(|()| { + // Wait to run the commands until the system is available again. + // This is needed so the systems can recursively run themselves. + let ret = system.run_without_applying_deferred(input, self); + system.queue_deferred(self.into()); + ret + }); - // return ownership of system trait object (if entity still exists) + // Return ownership of system trait object (if entity still exists) if let Ok(mut entity) = self.get_entity_mut(id.entity) { entity.insert::>(RegisteredSystem { initialized, system, }); } + + // Run any commands enqueued by the system + self.flush(); result } @@ -389,23 +407,23 @@ impl World { ); } - if !self.contains_resource::>() { + if !self.contains_resource::>() { let id = self.register_system(system); - self.insert_resource(CachedSystemId::(id)); + self.insert_resource(CachedSystemId::::new(id)); return id; } - self.resource_scope(|world, mut id: Mut>| { - if let Ok(mut entity) = world.get_entity_mut(id.0.entity()) { + self.resource_scope(|world, mut id: Mut>| { + if let Ok(mut entity) = world.get_entity_mut(id.entity) { if !entity.contains::>() { entity.insert(RegisteredSystem::new(Box::new(IntoSystem::into_system( system, )))); } } else { - id.0 = world.register_system(system); + id.entity = world.register_system(system).entity(); } - id.0 + SystemId::from_entity(id.entity) }) } @@ -422,9 +440,9 @@ impl World { S: IntoSystem + 'static, { let id = self - .remove_resource::>() + .remove_resource::>() .ok_or(RegisteredSystemError::SystemNotCached)?; - self.unregister_system(id.0) + self.unregister_system(SystemId::::from_entity(id.entity)) } /// Runs a cached system, registering it if necessary. @@ -475,10 +493,14 @@ pub enum RegisteredSystemError { #[error("System {0:?} tried to remove itself")] SelfRemove(SystemId), /// System could not be run due to parameters that failed validation. - /// - /// This can occur because the data required by the system was not present in the world. - #[error("The data required by the system {0:?} was not found in the world and the system did not run due to failed parameter validation.")] - InvalidParams(SystemId), + /// This should not be considered an error if [`field@SystemParamValidationError::skipped`] is `true`. + #[error("System {system:?} did not run due to failed parameter validation: {err}")] + InvalidParams { + /// The identifier of the system that was run. + system: SystemId, + /// The returned parameter validation error. + err: SystemParamValidationError, + }, } impl core::fmt::Debug for RegisteredSystemError { @@ -490,13 +512,22 @@ impl core::fmt::Debug for RegisteredSystemError { Self::SystemNotCached => write!(f, "SystemNotCached"), Self::Recursive(arg0) => f.debug_tuple("Recursive").field(arg0).finish(), Self::SelfRemove(arg0) => f.debug_tuple("SelfRemove").field(arg0).finish(), - Self::InvalidParams(arg0) => f.debug_tuple("InvalidParams").field(arg0).finish(), + Self::InvalidParams { system, err } => f + .debug_struct("InvalidParams") + .field("system", system) + .field("err", err) + .finish(), } } } +#[cfg(test)] mod tests { - use crate::prelude::*; + use core::cell::Cell; + + use bevy_utils::default; + + use crate::{prelude::*, system::SystemId}; #[derive(Resource, Default, PartialEq, Debug)] struct Counter(u8); @@ -754,6 +785,43 @@ mod tests { assert!(matches!(output, Ok(8))); } + #[test] + fn cached_system_into_same_system_type() { + use crate::error::Result; + + struct Foo; + impl IntoSystem<(), Result<()>, ()> for Foo { + type System = ApplyDeferred; + fn into_system(_: Self) -> Self::System { + ApplyDeferred + } + } + + struct Bar; + impl IntoSystem<(), Result<()>, ()> for Bar { + type System = ApplyDeferred; + fn into_system(_: Self) -> Self::System { + ApplyDeferred + } + } + + let mut world = World::new(); + let foo1 = world.register_system_cached(Foo); + let foo2 = world.register_system_cached(Foo); + let bar1 = world.register_system_cached(Bar); + let bar2 = world.register_system_cached(Bar); + + // The `S: IntoSystem` types are different, so they should be cached + // as separate systems, even though the `::System` + // types / values are the same (`ApplyDeferred`). + assert_ne!(foo1, bar1); + + // But if the `S: IntoSystem` types are the same, they'll be cached + // as the same system. + assert_eq!(foo1, foo2); + assert_eq!(bar1, bar2); + } + #[test] fn system_with_input_ref() { fn with_ref(InRef(input): InRef, mut counter: ResMut) { @@ -797,19 +865,54 @@ mod tests { #[test] fn run_system_invalid_params() { use crate::system::RegisteredSystemError; + use alloc::{format, string::ToString}; struct T; impl Resource for T {} fn system(_: Res) {} let mut world = World::new(); - let id = world.register_system(system.warn_param_missing()); + let id = world.register_system(system); // This fails because `T` has not been added to the world yet. let result = world.run_system(id); assert!(matches!( result, - Err(RegisteredSystemError::InvalidParams(_)) + Err(RegisteredSystemError::InvalidParams { .. }) )); + let expected = format!("System {id:?} did not run due to failed parameter validation: Parameter `Res` failed validation: Resource does not exist"); + assert_eq!(expected, result.unwrap_err().to_string()); + } + + #[test] + fn run_system_recursive() { + std::thread_local! { + static INVOCATIONS_LEFT: Cell = const { Cell::new(3) }; + static SYSTEM_ID: Cell> = default(); + } + + fn system(mut commands: Commands) { + let count = INVOCATIONS_LEFT.get() - 1; + INVOCATIONS_LEFT.set(count); + if count > 0 { + commands.run_system(SYSTEM_ID.get().unwrap()); + } + } + + let mut world = World::new(); + let id = world.register_system(system); + SYSTEM_ID.set(Some(id)); + world.run_system(id).unwrap(); + + assert_eq!(INVOCATIONS_LEFT.get(), 0); + } + + #[test] + fn run_system_exclusive_adapters() { + let mut world = World::new(); + fn system(_: &mut World) {} + world.run_system_cached(system).unwrap(); + world.run_system_cached(system.pipe(system)).unwrap(); + world.run_system_cached(system.map(|()| {})).unwrap(); } } diff --git a/crates/bevy_ecs/src/world/deferred_world.rs b/crates/bevy_ecs/src/world/deferred_world.rs index 4697dc9d30..02c12fe6a3 100644 --- a/crates/bevy_ecs/src/world/deferred_world.rs +++ b/crates/bevy_ecs/src/world/deferred_world.rs @@ -1,26 +1,27 @@ use core::ops::Deref; -#[cfg(feature = "track_location")] -use core::panic::Location; use crate::{ archetype::Archetype, - change_detection::MutUntyped, + change_detection::{MaybeLocation, MutUntyped}, component::{ComponentId, HookContext, Mutable}, entity::Entity, event::{Event, EventId, Events, SendBatchIds}, observer::{Observers, TriggerTargets}, prelude::{Component, QueryState}, query::{QueryData, QueryFilter}, + relationship::RelationshipHookMode, resource::Resource, system::{Commands, Query}, traversal::Traversal, - world::{error::EntityFetchError, WorldEntityFetch}, + world::{error::EntityMutableFetchError, EntityFetcher, WorldEntityFetch}, }; use super::{unsafe_world_cell::UnsafeWorldCell, Mut, World, ON_INSERT, ON_REPLACE}; /// A [`World`] reference that disallows structural ECS changes. /// This includes initializing resources, registering components or spawning entities. +/// +/// This means that in order to add entities, for example, you will need to use commands instead of the world directly. pub struct DeferredWorld<'w> { // SAFETY: Implementors must not use this reference to make structural changes world: UnsafeWorldCell<'w>, @@ -96,26 +97,44 @@ impl<'w> DeferredWorld<'w> { &mut self, entity: Entity, f: impl FnOnce(&mut T) -> R, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { // If the component is not registered, then it doesn't exist on this entity, so no action required. let Some(component_id) = self.component_id::() else { return Ok(None); }; - let entity_cell = match self.get_entity_mut(entity) { - Ok(cell) => cell, - Err(EntityFetchError::AliasedMutability(..)) => { - return Err(EntityFetchError::AliasedMutability(entity)) - } - Err(EntityFetchError::NoSuchEntity(..)) => { - return Err(EntityFetchError::NoSuchEntity( - entity, - self.entities().entity_does_not_exist_error_details(entity), - )) - } - }; + self.modify_component_by_id(entity, component_id, move |component| { + // SAFETY: component matches the component_id collected in the above line + let mut component = unsafe { component.with_type::() }; - if !entity_cell.contains::() { + f(&mut component) + }) + } + + /// Temporarily removes a [`Component`] identified by the provided + /// [`ComponentId`] from the provided [`Entity`] and runs the provided + /// closure on it, returning the result if the component was available. + /// This will trigger the `OnRemove` and `OnReplace` component hooks without + /// causing an archetype move. + /// + /// This is most useful with immutable components, where removal and reinsertion + /// is the only way to modify a value. + /// + /// If you do not need to ensure the above hooks are triggered, and your component + /// is mutable, prefer using [`get_mut_by_id`](DeferredWorld::get_mut_by_id). + /// + /// You should prefer the typed [`modify_component`](DeferredWorld::modify_component) + /// whenever possible. + #[inline] + pub(crate) fn modify_component_by_id( + &mut self, + entity: Entity, + component_id: ComponentId, + f: impl for<'a> FnOnce(MutUntyped<'a>) -> R, + ) -> Result, EntityMutableFetchError> { + let entity_cell = self.get_entity_mut(entity)?; + + if !entity_cell.contains_id(component_id) { return Ok(None); } @@ -132,16 +151,15 @@ impl<'w> DeferredWorld<'w> { archetype, entity, [component_id].into_iter(), - #[cfg(feature = "track_location")] - Location::caller(), + MaybeLocation::caller(), + RelationshipHookMode::Run, ); if archetype.has_replace_observer() { self.trigger_observers( ON_REPLACE, entity, [component_id].into_iter(), - #[cfg(feature = "track_location")] - Location::caller(), + MaybeLocation::caller(), ); } } @@ -153,11 +171,11 @@ impl<'w> DeferredWorld<'w> { // SAFETY: we will run the required hooks to simulate removal/replacement. let mut component = unsafe { entity_cell - .get_mut_assume_mutable::() + .get_mut_assume_mutable_by_id(component_id) .expect("component access confirmed above") }; - let result = f(&mut component); + let result = f(component.reborrow()); // Simulate adding this component by updating the relevant ticks *component.ticks.added = *component.ticks.changed; @@ -173,16 +191,15 @@ impl<'w> DeferredWorld<'w> { archetype, entity, [component_id].into_iter(), - #[cfg(feature = "track_location")] - Location::caller(), + MaybeLocation::caller(), + RelationshipHookMode::Run, ); if archetype.has_insert_observer() { self.trigger_observers( ON_INSERT, entity, [component_id].into_iter(), - #[cfg(feature = "track_location")] - Location::caller(), + MaybeLocation::caller(), ); } } @@ -207,23 +224,23 @@ impl<'w> DeferredWorld<'w> { /// /// # Errors /// - /// - Returns [`EntityFetchError::NoSuchEntity`] if any of the given `entities` do not exist in the world. + /// - Returns [`EntityMutableFetchError::EntityDoesNotExist`] if any of the given `entities` do not exist in the world. /// - Only the first entity found to be missing will be returned. - /// - Returns [`EntityFetchError::AliasedMutability`] if the same entity is requested multiple times. + /// - Returns [`EntityMutableFetchError::AliasedMutability`] if the same entity is requested multiple times. /// /// # Examples /// /// For examples, see [`DeferredWorld::entity_mut`]. /// /// [`EntityMut`]: crate::world::EntityMut - /// [`&EntityHashSet`]: crate::entity::hash_set::EntityHashSet - /// [`EntityHashMap`]: crate::entity::hash_map::EntityHashMap + /// [`&EntityHashSet`]: crate::entity::EntityHashSet + /// [`EntityHashMap`]: crate::entity::EntityHashMap /// [`Vec`]: alloc::vec::Vec #[inline] pub fn get_entity_mut( &mut self, entities: F, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { let cell = self.as_unsafe_world_cell(); // SAFETY: `&mut self` gives mutable access to the entire world, // and prevents any other access to the world. @@ -325,7 +342,7 @@ impl<'w> DeferredWorld<'w> { /// ## [`&EntityHashSet`] /// /// ``` - /// # use bevy_ecs::{prelude::*, entity::hash_set::EntityHashSet, world::DeferredWorld}; + /// # use bevy_ecs::{prelude::*, entity::EntityHashSet, world::DeferredWorld}; /// #[derive(Component)] /// struct Position { /// x: f32, @@ -348,14 +365,61 @@ impl<'w> DeferredWorld<'w> { /// ``` /// /// [`EntityMut`]: crate::world::EntityMut - /// [`&EntityHashSet`]: crate::entity::hash_set::EntityHashSet - /// [`EntityHashMap`]: crate::entity::hash_map::EntityHashMap + /// [`&EntityHashSet`]: crate::entity::EntityHashSet + /// [`EntityHashMap`]: crate::entity::EntityHashMap /// [`Vec`]: alloc::vec::Vec #[inline] pub fn entity_mut(&mut self, entities: F) -> F::DeferredMut<'_> { self.get_entity_mut(entities).unwrap() } + /// Simultaneously provides access to entity data and a command queue, which + /// will be applied when the [`World`] is next flushed. + /// + /// This allows using borrowed entity data to construct commands where the + /// borrow checker would otherwise prevent it. + /// + /// See [`World::entities_and_commands`] for the non-deferred version. + /// + /// # Example + /// + /// ```rust + /// # use bevy_ecs::{prelude::*, world::DeferredWorld}; + /// #[derive(Component)] + /// struct Targets(Vec); + /// #[derive(Component)] + /// struct TargetedBy(Entity); + /// + /// # let mut _world = World::new(); + /// # let e1 = _world.spawn_empty().id(); + /// # let e2 = _world.spawn_empty().id(); + /// # let eid = _world.spawn(Targets(vec![e1, e2])).id(); + /// let mut world: DeferredWorld = // ... + /// # DeferredWorld::from(&mut _world); + /// let (entities, mut commands) = world.entities_and_commands(); + /// + /// let entity = entities.get(eid).unwrap(); + /// for &target in entity.get::().unwrap().0.iter() { + /// commands.entity(target).insert(TargetedBy(eid)); + /// } + /// # _world.flush(); + /// # assert_eq!(_world.get::(e1).unwrap().0, eid); + /// # assert_eq!(_world.get::(e2).unwrap().0, eid); + /// ``` + pub fn entities_and_commands(&mut self) -> (EntityFetcher, Commands) { + let cell = self.as_unsafe_world_cell(); + // SAFETY: `&mut self` gives mutable access to the entire world, and prevents simultaneous access. + let fetcher = unsafe { EntityFetcher::new(cell) }; + // SAFETY: + // - `&mut self` gives mutable access to the entire world, and prevents simultaneous access. + // - Command queue access does not conflict with entity access. + let raw_queue = unsafe { cell.get_raw_command_queue() }; + // SAFETY: `&mut self` ensures the commands does not outlive the world. + let commands = unsafe { Commands::new_raw_from_entities(raw_queue, cell.entities()) }; + + (fetcher, commands) + } + /// Returns [`Query`] for the given [`QueryState`], which is used to efficiently /// run queries on the [`World`] by storing and reusing the [`QueryState`]. /// @@ -519,7 +583,7 @@ impl<'w> DeferredWorld<'w> { archetype: &Archetype, entity: Entity, targets: impl Iterator, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { if archetype.has_add_hook() { for component_id in targets { @@ -531,10 +595,8 @@ impl<'w> DeferredWorld<'w> { HookContext { entity, component_id, - #[cfg(feature = "track_location")] - caller: Some(caller), - #[cfg(not(feature = "track_location"))] - caller: None, + caller, + relationship_hook_mode: RelationshipHookMode::Run, }, ); } @@ -552,7 +614,8 @@ impl<'w> DeferredWorld<'w> { archetype: &Archetype, entity: Entity, targets: impl Iterator, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, + relationship_hook_mode: RelationshipHookMode, ) { if archetype.has_insert_hook() { for component_id in targets { @@ -564,10 +627,8 @@ impl<'w> DeferredWorld<'w> { HookContext { entity, component_id, - #[cfg(feature = "track_location")] - caller: Some(caller), - #[cfg(not(feature = "track_location"))] - caller: None, + caller, + relationship_hook_mode, }, ); } @@ -585,7 +646,8 @@ impl<'w> DeferredWorld<'w> { archetype: &Archetype, entity: Entity, targets: impl Iterator, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, + relationship_hook_mode: RelationshipHookMode, ) { if archetype.has_replace_hook() { for component_id in targets { @@ -597,10 +659,8 @@ impl<'w> DeferredWorld<'w> { HookContext { entity, component_id, - #[cfg(feature = "track_location")] - caller: Some(caller), - #[cfg(not(feature = "track_location"))] - caller: None, + caller, + relationship_hook_mode, }, ); } @@ -618,7 +678,7 @@ impl<'w> DeferredWorld<'w> { archetype: &Archetype, entity: Entity, targets: impl Iterator, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { if archetype.has_remove_hook() { for component_id in targets { @@ -630,10 +690,8 @@ impl<'w> DeferredWorld<'w> { HookContext { entity, component_id, - #[cfg(feature = "track_location")] - caller: Some(caller), - #[cfg(not(feature = "track_location"))] - caller: None, + caller, + relationship_hook_mode: RelationshipHookMode::Run, }, ); } @@ -651,7 +709,7 @@ impl<'w> DeferredWorld<'w> { archetype: &Archetype, entity: Entity, targets: impl Iterator, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { if archetype.has_despawn_hook() { for component_id in targets { @@ -663,10 +721,8 @@ impl<'w> DeferredWorld<'w> { HookContext { entity, component_id, - #[cfg(feature = "track_location")] - caller: Some(caller), - #[cfg(not(feature = "track_location"))] - caller: None, + caller, + relationship_hook_mode: RelationshipHookMode::Run, }, ); } @@ -684,7 +740,7 @@ impl<'w> DeferredWorld<'w> { event: ComponentId, target: Entity, components: impl Iterator + Clone, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { Observers::invoke::<_>( self.reborrow(), @@ -693,7 +749,6 @@ impl<'w> DeferredWorld<'w> { components, &mut (), &mut false, - #[cfg(feature = "track_location")] caller, ); } @@ -707,10 +762,10 @@ impl<'w> DeferredWorld<'w> { &mut self, event: ComponentId, mut target: Entity, - components: &[ComponentId], + components: impl Iterator + Clone, data: &mut E, mut propagate: bool, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) where T: Traversal, { @@ -719,10 +774,9 @@ impl<'w> DeferredWorld<'w> { self.reborrow(), event, target, - components.iter().copied(), + components.clone(), data, &mut propagate, - #[cfg(feature = "track_location")] caller, ); if !propagate { diff --git a/crates/bevy_ecs/src/world/entity_fetch.rs b/crates/bevy_ecs/src/world/entity_fetch.rs index 466dcc31d0..8588131563 100644 --- a/crates/bevy_ecs/src/world/entity_fetch.rs +++ b/crates/bevy_ecs/src/world/entity_fetch.rs @@ -2,13 +2,99 @@ use alloc::vec::Vec; use core::mem::MaybeUninit; use crate::{ - entity::{hash_map::EntityHashMap, hash_set::EntityHashSet, Entity}, + entity::{Entity, EntityDoesNotExistError, EntityHashMap, EntityHashSet}, + error::Result, world::{ - error::EntityFetchError, unsafe_world_cell::UnsafeWorldCell, EntityMut, EntityRef, + error::EntityMutableFetchError, unsafe_world_cell::UnsafeWorldCell, EntityMut, EntityRef, EntityWorldMut, }, }; +/// Provides a safe interface for non-structural access to the entities in a [`World`]. +/// +/// This cannot add or remove components, or spawn or despawn entities, +/// making it relatively safe to access in concert with other ECS data. +/// This type can be constructed via [`World::entities_and_commands`], +/// or [`DeferredWorld::entities_and_commands`]. +/// +/// [`World`]: crate::world::World +/// [`World::entities_and_commands`]: crate::world::World::entities_and_commands +/// [`DeferredWorld::entities_and_commands`]: crate::world::DeferredWorld::entities_and_commands +pub struct EntityFetcher<'w> { + cell: UnsafeWorldCell<'w>, +} + +impl<'w> EntityFetcher<'w> { + // SAFETY: + // - The given `cell` has mutable access to all entities. + // - No other references to entities exist at the same time. + pub(crate) unsafe fn new(cell: UnsafeWorldCell<'w>) -> Self { + Self { cell } + } + + /// Returns [`EntityRef`]s that expose read-only operations for the given + /// `entities`, returning [`Err`] if any of the given entities do not exist. + /// + /// This function supports fetching a single entity or multiple entities: + /// - Pass an [`Entity`] to receive a single [`EntityRef`]. + /// - Pass a slice of [`Entity`]s to receive a [`Vec`]. + /// - Pass an array of [`Entity`]s to receive an equally-sized array of [`EntityRef`]s. + /// - Pass a reference to a [`EntityHashSet`](crate::entity::EntityHashMap) to receive an + /// [`EntityHashMap`](crate::entity::EntityHashMap). + /// + /// # Errors + /// + /// If any of the given `entities` do not exist in the world, the first + /// [`Entity`] found to be missing will return an [`EntityDoesNotExistError`]. + /// + /// # Examples + /// + /// For examples, see [`World::entity`]. + /// + /// [`World::entity`]: crate::world::World::entity + #[inline] + pub fn get( + &self, + entities: F, + ) -> Result, EntityDoesNotExistError> { + // SAFETY: `&self` gives read access to all entities, and prevents mutable access. + unsafe { entities.fetch_ref(self.cell) } + } + + /// Returns [`EntityMut`]s that expose read and write operations for the + /// given `entities`, returning [`Err`] if any of the given entities do not + /// exist. + /// + /// This function supports fetching a single entity or multiple entities: + /// - Pass an [`Entity`] to receive a single [`EntityMut`]. + /// - This reference type allows for structural changes to the entity, + /// such as adding or removing components, or despawning the entity. + /// - Pass a slice of [`Entity`]s to receive a [`Vec`]. + /// - Pass an array of [`Entity`]s to receive an equally-sized array of [`EntityMut`]s. + /// - Pass a reference to a [`EntityHashSet`](crate::entity::EntityHashMap) to receive an + /// [`EntityHashMap`](crate::entity::EntityHashMap). + /// # Errors + /// + /// - Returns [`EntityMutableFetchError::EntityDoesNotExist`] if any of the given `entities` do not exist in the world. + /// - Only the first entity found to be missing will be returned. + /// - Returns [`EntityMutableFetchError::AliasedMutability`] if the same entity is requested multiple times. + /// + /// # Examples + /// + /// For examples, see [`DeferredWorld::entity_mut`]. + /// + /// [`DeferredWorld::entity_mut`]: crate::world::DeferredWorld::entity_mut + #[inline] + pub fn get_mut( + &mut self, + entities: F, + ) -> Result, EntityMutableFetchError> { + // SAFETY: `&mut self` gives mutable access to all entities, + // and prevents any other access to entities. + unsafe { entities.fetch_deferred_mut(self.cell) } + } +} + /// Types that can be used to fetch [`Entity`] references from a [`World`]. /// /// Provided implementations are: @@ -56,8 +142,11 @@ pub unsafe trait WorldEntityFetch { /// /// # Errors /// - /// - Returns [`Entity`] if the entity does not exist. - unsafe fn fetch_ref(self, cell: UnsafeWorldCell<'_>) -> Result, Entity>; + /// - Returns [`EntityDoesNotExistError`] if the entity does not exist. + unsafe fn fetch_ref( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityDoesNotExistError>; /// Returns mutable reference(s) to the entities with the given [`Entity`] /// IDs, as determined by `self`. @@ -70,11 +159,13 @@ pub unsafe trait WorldEntityFetch { /// /// # Errors /// - /// - Returns [`EntityFetchError::NoSuchEntity`] if the entity does not exist. - /// - Returns [`EntityFetchError::AliasedMutability`] if the entity was + /// - Returns [`EntityMutableFetchError::EntityDoesNotExist`] if the entity does not exist. + /// - Returns [`EntityMutableFetchError::AliasedMutability`] if the entity was /// requested mutably more than once. - unsafe fn fetch_mut(self, cell: UnsafeWorldCell<'_>) - -> Result, EntityFetchError>; + unsafe fn fetch_mut( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityMutableFetchError>; /// Returns mutable reference(s) to the entities with the given [`Entity`] /// IDs, as determined by `self`, but without structural mutability. @@ -91,13 +182,13 @@ pub unsafe trait WorldEntityFetch { /// /// # Errors /// - /// - Returns [`EntityFetchError::NoSuchEntity`] if the entity does not exist. - /// - Returns [`EntityFetchError::AliasedMutability`] if the entity was + /// - Returns [`EntityMutableFetchError::EntityDoesNotExist`] if the entity does not exist. + /// - Returns [`EntityMutableFetchError::AliasedMutability`] if the entity was /// requested mutably more than once. unsafe fn fetch_deferred_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError>; + ) -> Result, EntityMutableFetchError>; } // SAFETY: @@ -109,8 +200,11 @@ unsafe impl WorldEntityFetch for Entity { type Mut<'w> = EntityWorldMut<'w>; type DeferredMut<'w> = EntityMut<'w>; - unsafe fn fetch_ref(self, cell: UnsafeWorldCell<'_>) -> Result, Entity> { - let ecell = cell.get_entity(self).ok_or(self)?; + unsafe fn fetch_ref( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityDoesNotExistError> { + let ecell = cell.get_entity(self)?; // SAFETY: caller ensures that the world cell has read-only access to the entity. Ok(unsafe { EntityRef::new(ecell) }) } @@ -118,14 +212,11 @@ unsafe impl WorldEntityFetch for Entity { unsafe fn fetch_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { let location = cell .entities() .get(self) - .ok_or(EntityFetchError::NoSuchEntity( - self, - cell.entities().entity_does_not_exist_error_details(self), - ))?; + .ok_or(EntityDoesNotExistError::new(self, cell.entities()))?; // SAFETY: caller ensures that the world cell has mutable access to the entity. let world = unsafe { cell.world_mut() }; // SAFETY: location was fetched from the same world's `Entities`. @@ -135,11 +226,8 @@ unsafe impl WorldEntityFetch for Entity { unsafe fn fetch_deferred_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { - let ecell = cell.get_entity(self).ok_or(EntityFetchError::NoSuchEntity( - self, - cell.entities().entity_does_not_exist_error_details(self), - ))?; + ) -> Result, EntityMutableFetchError> { + let ecell = cell.get_entity(self)?; // SAFETY: caller ensures that the world cell has mutable access to the entity. Ok(unsafe { EntityMut::new(ecell) }) } @@ -154,21 +242,24 @@ unsafe impl WorldEntityFetch for [Entity; N] { type Mut<'w> = [EntityMut<'w>; N]; type DeferredMut<'w> = [EntityMut<'w>; N]; - unsafe fn fetch_ref(self, cell: UnsafeWorldCell<'_>) -> Result, Entity> { + unsafe fn fetch_ref( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityDoesNotExistError> { <&Self>::fetch_ref(&self, cell) } unsafe fn fetch_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { <&Self>::fetch_mut(&self, cell) } unsafe fn fetch_deferred_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { <&Self>::fetch_deferred_mut(&self, cell) } } @@ -182,10 +273,13 @@ unsafe impl WorldEntityFetch for &'_ [Entity; N] { type Mut<'w> = [EntityMut<'w>; N]; type DeferredMut<'w> = [EntityMut<'w>; N]; - unsafe fn fetch_ref(self, cell: UnsafeWorldCell<'_>) -> Result, Entity> { + unsafe fn fetch_ref( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityDoesNotExistError> { let mut refs = [MaybeUninit::uninit(); N]; for (r, &id) in core::iter::zip(&mut refs, self) { - let ecell = cell.get_entity(id).ok_or(id)?; + let ecell = cell.get_entity(id)?; // SAFETY: caller ensures that the world cell has read-only access to the entity. *r = MaybeUninit::new(unsafe { EntityRef::new(ecell) }); } @@ -199,22 +293,19 @@ unsafe impl WorldEntityFetch for &'_ [Entity; N] { unsafe fn fetch_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { // Check for duplicate entities. for i in 0..self.len() { for j in 0..i { if self[i] == self[j] { - return Err(EntityFetchError::AliasedMutability(self[i])); + return Err(EntityMutableFetchError::AliasedMutability(self[i])); } } } let mut refs = [const { MaybeUninit::uninit() }; N]; for (r, &id) in core::iter::zip(&mut refs, self) { - let ecell = cell.get_entity(id).ok_or(EntityFetchError::NoSuchEntity( - id, - cell.entities().entity_does_not_exist_error_details(id), - ))?; + let ecell = cell.get_entity(id)?; // SAFETY: caller ensures that the world cell has mutable access to the entity. *r = MaybeUninit::new(unsafe { EntityMut::new(ecell) }); } @@ -228,7 +319,7 @@ unsafe impl WorldEntityFetch for &'_ [Entity; N] { unsafe fn fetch_deferred_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { // SAFETY: caller ensures that the world cell has mutable access to the entity, // and `fetch_mut` does not return structurally-mutable references. unsafe { self.fetch_mut(cell) } @@ -244,10 +335,13 @@ unsafe impl WorldEntityFetch for &'_ [Entity] { type Mut<'w> = Vec>; type DeferredMut<'w> = Vec>; - unsafe fn fetch_ref(self, cell: UnsafeWorldCell<'_>) -> Result, Entity> { + unsafe fn fetch_ref( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityDoesNotExistError> { let mut refs = Vec::with_capacity(self.len()); for &id in self { - let ecell = cell.get_entity(id).ok_or(id)?; + let ecell = cell.get_entity(id)?; // SAFETY: caller ensures that the world cell has read-only access to the entity. refs.push(unsafe { EntityRef::new(ecell) }); } @@ -258,22 +352,19 @@ unsafe impl WorldEntityFetch for &'_ [Entity] { unsafe fn fetch_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { // Check for duplicate entities. for i in 0..self.len() { for j in 0..i { if self[i] == self[j] { - return Err(EntityFetchError::AliasedMutability(self[i])); + return Err(EntityMutableFetchError::AliasedMutability(self[i])); } } } let mut refs = Vec::with_capacity(self.len()); for &id in self { - let ecell = cell.get_entity(id).ok_or(EntityFetchError::NoSuchEntity( - id, - cell.entities().entity_does_not_exist_error_details(id), - ))?; + let ecell = cell.get_entity(id)?; // SAFETY: caller ensures that the world cell has mutable access to the entity. refs.push(unsafe { EntityMut::new(ecell) }); } @@ -284,7 +375,7 @@ unsafe impl WorldEntityFetch for &'_ [Entity] { unsafe fn fetch_deferred_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { // SAFETY: caller ensures that the world cell has mutable access to the entity, // and `fetch_mut` does not return structurally-mutable references. unsafe { self.fetch_mut(cell) } @@ -300,10 +391,13 @@ unsafe impl WorldEntityFetch for &'_ EntityHashSet { type Mut<'w> = EntityHashMap>; type DeferredMut<'w> = EntityHashMap>; - unsafe fn fetch_ref(self, cell: UnsafeWorldCell<'_>) -> Result, Entity> { + unsafe fn fetch_ref( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityDoesNotExistError> { let mut refs = EntityHashMap::with_capacity(self.len()); for &id in self { - let ecell = cell.get_entity(id).ok_or(id)?; + let ecell = cell.get_entity(id)?; // SAFETY: caller ensures that the world cell has read-only access to the entity. refs.insert(id, unsafe { EntityRef::new(ecell) }); } @@ -313,13 +407,10 @@ unsafe impl WorldEntityFetch for &'_ EntityHashSet { unsafe fn fetch_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { let mut refs = EntityHashMap::with_capacity(self.len()); for &id in self { - let ecell = cell.get_entity(id).ok_or(EntityFetchError::NoSuchEntity( - id, - cell.entities().entity_does_not_exist_error_details(id), - ))?; + let ecell = cell.get_entity(id)?; // SAFETY: caller ensures that the world cell has mutable access to the entity. refs.insert(id, unsafe { EntityMut::new(ecell) }); } @@ -329,7 +420,7 @@ unsafe impl WorldEntityFetch for &'_ EntityHashSet { unsafe fn fetch_deferred_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { // SAFETY: caller ensures that the world cell has mutable access to the entity, // and `fetch_mut` does not return structurally-mutable references. unsafe { self.fetch_mut(cell) } diff --git a/crates/bevy_ecs/src/world/entity_ref.rs b/crates/bevy_ecs/src/world/entity_ref.rs index 3a9a49e7fa..a9887c5248 100644 --- a/crates/bevy_ecs/src/world/entity_ref.rs +++ b/crates/bevy_ecs/src/world/entity_ref.rs @@ -4,15 +4,19 @@ use crate::{ Bundle, BundleEffect, BundleFromComponents, BundleId, BundleInfo, BundleInserter, DynamicBundle, InsertMode, }, - change_detection::MutUntyped, - component::{Component, ComponentId, ComponentTicks, Components, Mutable, StorageType}, + change_detection::{MaybeLocation, MutUntyped}, + component::{ + Component, ComponentId, ComponentTicks, Components, ComponentsRegistrator, Mutable, + StorageType, + }, entity::{ - Entities, Entity, EntityBorrow, EntityCloner, EntityClonerBuilder, EntityLocation, - TrustedEntityBorrow, + ContainsEntity, Entities, Entity, EntityCloner, EntityClonerBuilder, EntityEquivalent, + EntityLocation, }, event::Event, observer::Observer, query::{Access, ReadOnlyQueryData}, + relationship::RelationshipHookMode, removal_detection::RemovedComponentEvents, resource::Resource, storage::Storages, @@ -23,10 +27,8 @@ use crate::{ }, }; use alloc::vec::Vec; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_ptr::{OwningPtr, Ptr}; -#[cfg(feature = "track_location")] -use core::panic::Location; use core::{ any::TypeId, cmp::Ordering, @@ -107,7 +109,7 @@ impl<'w> EntityRef<'w> { /// /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using - /// [`Self::contains_type_id`]. + /// [`Self::contains_type_id`]. #[inline] pub fn contains_id(&self, component_id: ComponentId) -> bool { self.cell.contains_id(component_id) @@ -248,7 +250,7 @@ impl<'w> EntityRef<'w> { /// ## [`HashSet`] of [`ComponentId`]s /// /// ``` - /// # use bevy_platform_support::collections::HashSet; + /// # use bevy_platform::collections::HashSet; /// # use bevy_ecs::{prelude::*, component::ComponentId}; /// # /// # #[derive(Component, PartialEq, Debug)] @@ -294,8 +296,7 @@ impl<'w> EntityRef<'w> { } /// Returns the source code location from which this entity has been spawned. - #[cfg(feature = "track_location")] - pub fn spawned_by(&self) -> &'static Location<'static> { + pub fn spawned_by(&self) -> MaybeLocation { self.cell.spawned_by() } } @@ -414,14 +415,14 @@ impl Hash for EntityRef<'_> { } } -impl EntityBorrow for EntityRef<'_> { +impl ContainsEntity for EntityRef<'_> { fn entity(&self) -> Entity { self.id() } } // SAFETY: This type represents one Entity. We implement the comparison traits based on that Entity. -unsafe impl TrustedEntityBorrow for EntityRef<'_> {} +unsafe impl EntityEquivalent for EntityRef<'_> {} /// Provides mutable access to a single entity and all of its components. /// @@ -513,7 +514,7 @@ impl<'w> EntityMut<'w> { /// /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using - /// [`Self::contains_type_id`]. + /// [`Self::contains_type_id`]. #[inline] pub fn contains_id(&self, component_id: ComponentId) -> bool { self.cell.contains_id(component_id) @@ -791,7 +792,7 @@ impl<'w> EntityMut<'w> { /// ## [`HashSet`] of [`ComponentId`]s /// /// ``` - /// # use bevy_platform_support::collections::HashSet; + /// # use bevy_platform::collections::HashSet; /// # use bevy_ecs::{prelude::*, component::ComponentId}; /// # /// # #[derive(Component, PartialEq, Debug)] @@ -823,6 +824,39 @@ impl<'w> EntityMut<'w> { unsafe { component_ids.fetch_mut(self.cell) } } + /// Returns [untyped mutable reference(s)](MutUntyped) to component(s) for + /// the current entity, based on the given [`ComponentId`]s. + /// Assumes the given [`ComponentId`]s refer to mutable components. + /// + /// **You should prefer to use the typed API [`EntityMut::get_mut_assume_mutable`] where + /// possible and only use this in cases where the actual component types + /// are not known at compile time.** + /// + /// Unlike [`EntityMut::get_mut_assume_mutable`], this returns untyped reference(s) to + /// component(s), and it's the job of the caller to ensure the correct + /// type(s) are dereferenced (if necessary). + /// + /// # Errors + /// + /// - Returns [`EntityComponentError::MissingComponent`] if the entity does + /// not have a component. + /// - Returns [`EntityComponentError::AliasedMutability`] if a component + /// is requested multiple times. + /// + /// # Safety + /// It is the callers responsibility to ensure that + /// - the provided [`ComponentId`]s must refer to mutable components. + #[inline] + pub unsafe fn get_mut_assume_mutable_by_id( + &mut self, + component_ids: F, + ) -> Result, EntityComponentError> { + // SAFETY: + // - `&mut self` ensures that no references exist to this entity's components. + // - We have exclusive access to all components of this entity. + unsafe { component_ids.fetch_mut_assume_mutable(self.cell) } + } + /// Returns [untyped mutable reference](MutUntyped) to component for /// the current entity, based on the given [`ComponentId`]. /// @@ -851,6 +885,36 @@ impl<'w> EntityMut<'w> { unsafe { component_ids.fetch_mut(self.cell) } } + /// Returns [untyped mutable reference](MutUntyped) to component for + /// the current entity, based on the given [`ComponentId`]. + /// Assumes the given [`ComponentId`]s refer to mutable components. + /// + /// Unlike [`EntityMut::get_mut_assume_mutable_by_id`], this method borrows &self instead of + /// &mut self, allowing the caller to access multiple components simultaneously. + /// + /// # Errors + /// + /// - Returns [`EntityComponentError::MissingComponent`] if the entity does + /// not have a component. + /// - Returns [`EntityComponentError::AliasedMutability`] if a component + /// is requested multiple times. + /// + /// # Safety + /// It is the callers responsibility to ensure that + /// - the [`UnsafeEntityCell`] has permission to access the component mutably + /// - no other references to the component exist at the same time + /// - the provided [`ComponentId`]s must refer to mutable components. + #[inline] + pub unsafe fn get_mut_assume_mutable_by_id_unchecked( + &self, + component_ids: F, + ) -> Result, EntityComponentError> { + // SAFETY: + // - The caller must ensure simultaneous access is limited + // - to components that are mutually independent. + unsafe { component_ids.fetch_mut_assume_mutable(self.cell) } + } + /// Consumes `self` and returns [untyped mutable reference(s)](MutUntyped) /// to component(s) with lifetime `'w` for the current entity, based on the /// given [`ComponentId`]s. @@ -884,9 +948,42 @@ impl<'w> EntityMut<'w> { unsafe { component_ids.fetch_mut(self.cell) } } + /// Consumes `self` and returns [untyped mutable reference(s)](MutUntyped) + /// to component(s) with lifetime `'w` for the current entity, based on the + /// given [`ComponentId`]s. + /// Assumes the given [`ComponentId`]s refer to mutable components. + /// + /// **You should prefer to use the typed API [`EntityMut::into_mut_assume_mutable`] where + /// possible and only use this in cases where the actual component types + /// are not known at compile time.** + /// + /// Unlike [`EntityMut::into_mut_assume_mutable`], this returns untyped reference(s) to + /// component(s), and it's the job of the caller to ensure the correct + /// type(s) are dereferenced (if necessary). + /// + /// # Errors + /// + /// - Returns [`EntityComponentError::MissingComponent`] if the entity does + /// not have a component. + /// - Returns [`EntityComponentError::AliasedMutability`] if a component + /// is requested multiple times. + /// + /// # Safety + /// It is the callers responsibility to ensure that + /// - the provided [`ComponentId`]s must refer to mutable components. + #[inline] + pub unsafe fn into_mut_assume_mutable_by_id( + self, + component_ids: F, + ) -> Result, EntityComponentError> { + // SAFETY: + // - consuming `self` ensures that no references exist to this entity's components. + // - We have exclusive access to all components of this entity. + unsafe { component_ids.fetch_mut_assume_mutable(self.cell) } + } + /// Returns the source code location from which this entity has been spawned. - #[cfg(feature = "track_location")] - pub fn spawned_by(&self) -> &'static Location<'static> { + pub fn spawned_by(&self) -> MaybeLocation { self.cell.spawned_by() } } @@ -969,14 +1066,14 @@ impl Hash for EntityMut<'_> { } } -impl EntityBorrow for EntityMut<'_> { +impl ContainsEntity for EntityMut<'_> { fn entity(&self) -> Entity { self.id() } } // SAFETY: This type represents one Entity. We implement the comparison traits based on that Entity. -unsafe impl TrustedEntityBorrow for EntityMut<'_> {} +unsafe impl EntityEquivalent for EntityMut<'_> {} /// A mutable reference to a particular [`Entity`], and the entire world. /// @@ -1138,7 +1235,7 @@ impl<'w> EntityWorldMut<'w> { /// /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using - /// [`Self::contains_type_id`]. + /// [`Self::contains_type_id`]. /// /// # Panics /// @@ -1302,6 +1399,38 @@ impl<'w> EntityWorldMut<'w> { Some(result) } + /// Temporarily removes a [`Component`] `T` from this [`Entity`] and runs the + /// provided closure on it, returning the result if `T` was available. + /// This will trigger the `OnRemove` and `OnReplace` component hooks without + /// causing an archetype move. + /// + /// This is most useful with immutable components, where removal and reinsertion + /// is the only way to modify a value. + /// + /// If you do not need to ensure the above hooks are triggered, and your component + /// is mutable, prefer using [`get_mut`](EntityWorldMut::get_mut). + /// + /// # Panics + /// + /// If the entity has been despawned while this `EntityWorldMut` is still alive. + #[inline] + pub fn modify_component_by_id( + &mut self, + component_id: ComponentId, + f: impl for<'a> FnOnce(MutUntyped<'a>) -> R, + ) -> Option { + self.assert_not_despawned(); + + let result = self + .world + .modify_component_by_id(self.entity, component_id, f) + .expect("entity access must be valid")?; + + self.update_location(); + + Some(result) + } + /// Gets mutable access to the component of type `T` for the current entity. /// Returns `None` if the entity does not have a component of type `T`. /// @@ -1326,6 +1455,23 @@ impl<'w> EntityWorldMut<'w> { unsafe { self.into_unsafe_entity_cell().get_mut() } } + /// Consumes `self` and gets mutable access to the component of type `T` + /// with the world `'w` lifetime for the current entity. + /// Returns `None` if the entity does not have a component of type `T`. + /// + /// # Panics + /// + /// If the entity has been despawned while this `EntityWorldMut` is still alive. + /// + /// # Safety + /// + /// - `T` must be a mutable component + #[inline] + pub unsafe fn into_mut_assume_mutable(self) -> Option> { + // SAFETY: consuming `self` implies exclusive access + unsafe { self.into_unsafe_entity_cell().get_mut_assume_mutable() } + } + /// Gets a reference to the resource of the given type /// /// # Panics @@ -1487,6 +1633,41 @@ impl<'w> EntityWorldMut<'w> { self.as_mutable().into_mut_by_id(component_ids) } + /// Returns [untyped mutable reference(s)](MutUntyped) to component(s) for + /// the current entity, based on the given [`ComponentId`]s. + /// Assumes the given [`ComponentId`]s refer to mutable components. + /// + /// **You should prefer to use the typed API [`EntityWorldMut::get_mut_assume_mutable`] where + /// possible and only use this in cases where the actual component types + /// are not known at compile time.** + /// + /// Unlike [`EntityWorldMut::get_mut_assume_mutable`], this returns untyped reference(s) to + /// component(s), and it's the job of the caller to ensure the correct + /// type(s) are dereferenced (if necessary). + /// + /// # Errors + /// + /// - Returns [`EntityComponentError::MissingComponent`] if the entity does + /// not have a component. + /// - Returns [`EntityComponentError::AliasedMutability`] if a component + /// is requested multiple times. + /// + /// # Panics + /// + /// If the entity has been despawned while this `EntityWorldMut` is still alive. + /// + /// # Safety + /// It is the callers responsibility to ensure that + /// - the provided [`ComponentId`]s must refer to mutable components. + #[inline] + pub unsafe fn get_mut_assume_mutable_by_id( + &mut self, + component_ids: F, + ) -> Result, EntityComponentError> { + self.as_mutable() + .into_mut_assume_mutable_by_id(component_ids) + } + /// Consumes `self` and returns [untyped mutable reference(s)](MutUntyped) /// to component(s) with lifetime `'w` for the current entity, based on the /// given [`ComponentId`]s. @@ -1521,6 +1702,42 @@ impl<'w> EntityWorldMut<'w> { self.into_mutable().into_mut_by_id(component_ids) } + /// Consumes `self` and returns [untyped mutable reference(s)](MutUntyped) + /// to component(s) with lifetime `'w` for the current entity, based on the + /// given [`ComponentId`]s. + /// Assumes the given [`ComponentId`]s refer to mutable components. + /// + /// **You should prefer to use the typed API [`EntityWorldMut::into_mut_assume_mutable`] where + /// possible and only use this in cases where the actual component types + /// are not known at compile time.** + /// + /// Unlike [`EntityWorldMut::into_mut_assume_mutable`], this returns untyped reference(s) to + /// component(s), and it's the job of the caller to ensure the correct + /// type(s) are dereferenced (if necessary). + /// + /// # Errors + /// + /// - Returns [`EntityComponentError::MissingComponent`] if the entity does + /// not have a component. + /// - Returns [`EntityComponentError::AliasedMutability`] if a component + /// is requested multiple times. + /// + /// # Panics + /// + /// If the entity has been despawned while this `EntityWorldMut` is still alive. + /// + /// # Safety + /// It is the callers responsibility to ensure that + /// - the provided [`ComponentId`]s must refer to mutable components. + #[inline] + pub unsafe fn into_mut_assume_mutable_by_id( + self, + component_ids: F, + ) -> Result, EntityComponentError> { + self.into_mutable() + .into_mut_assume_mutable_by_id(component_ids) + } + /// Adds a [`Bundle`] of components to the entity. /// /// This will overwrite any previous value(s) of the same component type. @@ -1533,8 +1750,36 @@ impl<'w> EntityWorldMut<'w> { self.insert_with_caller( bundle, InsertMode::Replace, - #[cfg(feature = "track_location")] - Location::caller(), + MaybeLocation::caller(), + RelationshipHookMode::Run, + ) + } + + /// Adds a [`Bundle`] of components to the entity. + /// [`Relationship`](crate::relationship::Relationship) components in the bundle will follow the configuration + /// in `relationship_hook_mode`. + /// + /// This will overwrite any previous value(s) of the same component type. + /// + /// # Warning + /// + /// This can easily break the integrity of relationships. This is intended to be used for cloning and spawning code internals, + /// not most user-facing scenarios. + /// + /// # Panics + /// + /// If the entity has been despawned while this `EntityWorldMut` is still alive. + #[track_caller] + pub fn insert_with_relationship_hook_mode( + &mut self, + bundle: T, + relationship_hook_mode: RelationshipHookMode, + ) -> &mut Self { + self.insert_with_caller( + bundle, + InsertMode::Replace, + MaybeLocation::caller(), + relationship_hook_mode, ) } @@ -1551,8 +1796,8 @@ impl<'w> EntityWorldMut<'w> { self.insert_with_caller( bundle, InsertMode::Keep, - #[cfg(feature = "track_location")] - Location::caller(), + MaybeLocation::caller(), + RelationshipHookMode::Run, ) } @@ -1563,7 +1808,8 @@ impl<'w> EntityWorldMut<'w> { &mut self, bundle: T, mode: InsertMode, - #[cfg(feature = "track_location")] caller: &'static Location, + caller: MaybeLocation, + relationship_hook_mode: RelationshipHookMode, ) -> &mut Self { self.assert_not_despawned(); let change_tick = self.world.change_tick(); @@ -1576,8 +1822,8 @@ impl<'w> EntityWorldMut<'w> { self.location, bundle, mode, - #[cfg(feature = "track_location")] caller, + relationship_hook_mode, ) }; self.location = location; @@ -1610,19 +1856,24 @@ impl<'w> EntityWorldMut<'w> { self.insert_by_id_with_caller( component_id, component, - #[cfg(feature = "track_location")] - Location::caller(), + InsertMode::Replace, + MaybeLocation::caller(), + RelationshipHookMode::Run, ) } /// # Safety - /// See [`EntityWorldMut::insert_by_id`] + /// + /// - [`ComponentId`] must be from the same world as [`EntityWorldMut`] + /// - [`OwningPtr`] must be a valid reference to the type represented by [`ComponentId`] #[inline] pub(crate) unsafe fn insert_by_id_with_caller( &mut self, component_id: ComponentId, component: OwningPtr<'_>, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + mode: InsertMode, + caller: MaybeLocation, + relationship_hook_insert_mode: RelationshipHookMode, ) -> &mut Self { self.assert_not_despawned(); let change_tick = self.world.change_tick(); @@ -1646,8 +1897,9 @@ impl<'w> EntityWorldMut<'w> { self.location, Some(component).into_iter(), Some(storage_type).iter().cloned(), - #[cfg(feature = "track_location")] + mode, caller, + relationship_hook_insert_mode, ); self.world.flush(); self.update_location(); @@ -1675,6 +1927,16 @@ impl<'w> EntityWorldMut<'w> { &mut self, component_ids: &[ComponentId], iter_components: I, + ) -> &mut Self { + self.insert_by_ids_internal(component_ids, iter_components, RelationshipHookMode::Run) + } + + #[track_caller] + pub(crate) unsafe fn insert_by_ids_internal<'a, I: Iterator>>( + &mut self, + component_ids: &[ComponentId], + iter_components: I, + relationship_hook_insert_mode: RelationshipHookMode, ) -> &mut Self { self.assert_not_despawned(); let change_tick = self.world.change_tick(); @@ -1698,8 +1960,9 @@ impl<'w> EntityWorldMut<'w> { self.location, iter_components, (*storage_types).iter().cloned(), - #[cfg(feature = "track_location")] - Location::caller(), + InsertMode::Replace, + MaybeLocation::caller(), + relationship_hook_insert_mode, ); *self.world.bundles.get_storages_unchecked(bundle_id) = core::mem::take(&mut storage_types); self.world.flush(); @@ -1722,8 +1985,10 @@ impl<'w> EntityWorldMut<'w> { self.assert_not_despawned(); let world = &mut self.world; let storages = &mut world.storages; - let components = &mut world.components; - let bundle_id = world.bundles.register_info::(components, storages); + // SAFETY: These come from the same world. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut world.components, &mut world.component_ids) }; + let bundle_id = world.bundles.register_info::(&mut registrator, storages); // SAFETY: We just ensured this bundle exists let bundle_info = unsafe { world.bundles.get_unchecked(bundle_id) }; let old_location = self.location; @@ -1733,7 +1998,7 @@ impl<'w> EntityWorldMut<'w> { bundle_info.remove_bundle_from_archetype( &mut world.archetypes, storages, - components, + ®istrator, &world.observers, old_location.archetype_id, false, @@ -1763,8 +2028,7 @@ impl<'w> EntityWorldMut<'w> { old_archetype, entity, bundle_info, - #[cfg(feature = "track_location")] - Location::caller(), + MaybeLocation::caller(), ); } @@ -1906,11 +2170,7 @@ impl<'w> EntityWorldMut<'w> { /// /// # Safety /// - A `BundleInfo` with the corresponding `BundleId` must have been initialized. - unsafe fn remove_bundle( - &mut self, - bundle: BundleId, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, - ) -> EntityLocation { + unsafe fn remove_bundle(&mut self, bundle: BundleId, caller: MaybeLocation) -> EntityLocation { let entity = self.entity; let world = &mut self.world; let location = self.location; @@ -1953,7 +2213,6 @@ impl<'w> EntityWorldMut<'w> { old_archetype, entity, bundle_info, - #[cfg(feature = "track_location")] caller, ); } @@ -2004,30 +2263,24 @@ impl<'w> EntityWorldMut<'w> { // TODO: BundleRemover? #[track_caller] pub fn remove(&mut self) -> &mut Self { - self.remove_with_caller::( - #[cfg(feature = "track_location")] - Location::caller(), - ) + self.remove_with_caller::(MaybeLocation::caller()) } #[inline] - pub(crate) fn remove_with_caller( - &mut self, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, - ) -> &mut Self { + pub(crate) fn remove_with_caller(&mut self, caller: MaybeLocation) -> &mut Self { self.assert_not_despawned(); let storages = &mut self.world.storages; - let components = &mut self.world.components; - let bundle_info = self.world.bundles.register_info::(components, storages); + // SAFETY: These come from the same world. + let mut registrator = unsafe { + ComponentsRegistrator::new(&mut self.world.components, &mut self.world.component_ids) + }; + let bundle_info = self + .world + .bundles + .register_info::(&mut registrator, storages); // SAFETY: the `BundleInfo` is initialized above - self.location = unsafe { - self.remove_bundle( - bundle_info, - #[cfg(feature = "track_location")] - caller, - ) - }; + self.location = unsafe { self.remove_bundle(bundle_info, caller) }; self.world.flush(); self.update_location(); self @@ -2040,31 +2293,25 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[track_caller] pub fn remove_with_requires(&mut self) -> &mut Self { - self.remove_with_requires_with_caller::( - #[cfg(feature = "track_location")] - Location::caller(), - ) + self.remove_with_requires_with_caller::(MaybeLocation::caller()) } pub(crate) fn remove_with_requires_with_caller( &mut self, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) -> &mut Self { self.assert_not_despawned(); let storages = &mut self.world.storages; - let components = &mut self.world.components; + // SAFETY: These come from the same world. + let mut registrator = unsafe { + ComponentsRegistrator::new(&mut self.world.components, &mut self.world.component_ids) + }; let bundles = &mut self.world.bundles; - let bundle_id = bundles.register_contributed_bundle_info::(components, storages); + let bundle_id = bundles.register_contributed_bundle_info::(&mut registrator, storages); // SAFETY: the dynamic `BundleInfo` is initialized above - self.location = unsafe { - self.remove_bundle( - bundle_id, - #[cfg(feature = "track_location")] - caller, - ) - }; + self.location = unsafe { self.remove_bundle(bundle_id, caller) }; self.world.flush(); self.update_location(); self @@ -2079,23 +2326,23 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[track_caller] pub fn retain(&mut self) -> &mut Self { - self.retain_with_caller::( - #[cfg(feature = "track_location")] - Location::caller(), - ) + self.retain_with_caller::(MaybeLocation::caller()) } #[inline] - pub(crate) fn retain_with_caller( - &mut self, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, - ) -> &mut Self { + pub(crate) fn retain_with_caller(&mut self, caller: MaybeLocation) -> &mut Self { self.assert_not_despawned(); let archetypes = &mut self.world.archetypes; let storages = &mut self.world.storages; - let components = &mut self.world.components; + // SAFETY: These come from the same world. + let mut registrator = unsafe { + ComponentsRegistrator::new(&mut self.world.components, &mut self.world.component_ids) + }; - let retained_bundle = self.world.bundles.register_info::(components, storages); + let retained_bundle = self + .world + .bundles + .register_info::(&mut registrator, storages); // SAFETY: `retained_bundle` exists as we just initialized it. let retained_bundle_info = unsafe { self.world.bundles.get_unchecked(retained_bundle) }; let old_location = self.location; @@ -2109,16 +2356,10 @@ impl<'w> EntityWorldMut<'w> { let remove_bundle = self.world .bundles - .init_dynamic_info(&mut self.world.storages, components, to_remove); + .init_dynamic_info(&mut self.world.storages, ®istrator, to_remove); // SAFETY: the `BundleInfo` for the components to remove is initialized above - self.location = unsafe { - self.remove_bundle( - remove_bundle, - #[cfg(feature = "track_location")] - caller, - ) - }; + self.location = unsafe { self.remove_bundle(remove_bundle, caller) }; self.world.flush(); self.update_location(); self @@ -2134,18 +2375,14 @@ impl<'w> EntityWorldMut<'w> { /// entity has been despawned while this `EntityWorldMut` is still alive. #[track_caller] pub fn remove_by_id(&mut self, component_id: ComponentId) -> &mut Self { - self.remove_by_id_with_caller( - component_id, - #[cfg(feature = "track_location")] - Location::caller(), - ) + self.remove_by_id_with_caller(component_id, MaybeLocation::caller()) } #[inline] pub(crate) fn remove_by_id_with_caller( &mut self, component_id: ComponentId, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) -> &mut Self { self.assert_not_despawned(); let components = &mut self.world.components; @@ -2157,13 +2394,7 @@ impl<'w> EntityWorldMut<'w> { ); // SAFETY: the `BundleInfo` for this `component_id` is initialized above - self.location = unsafe { - self.remove_bundle( - bundle_id, - #[cfg(feature = "track_location")] - caller, - ) - }; + self.location = unsafe { self.remove_bundle(bundle_id, caller) }; self.world.flush(); self.update_location(); self @@ -2189,13 +2420,7 @@ impl<'w> EntityWorldMut<'w> { ); // SAFETY: the `BundleInfo` for this `bundle_id` is initialized above - unsafe { - self.remove_bundle( - bundle_id, - #[cfg(feature = "track_location")] - Location::caller(), - ) - }; + unsafe { self.remove_bundle(bundle_id, MaybeLocation::caller()) }; self.world.flush(); self.update_location(); @@ -2209,17 +2434,11 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[track_caller] pub fn clear(&mut self) -> &mut Self { - self.clear_with_caller( - #[cfg(feature = "track_location")] - Location::caller(), - ) + self.clear_with_caller(MaybeLocation::caller()) } #[inline] - pub(crate) fn clear_with_caller( - &mut self, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, - ) -> &mut Self { + pub(crate) fn clear_with_caller(&mut self, caller: MaybeLocation) -> &mut Self { self.assert_not_despawned(); let component_ids: Vec = self.archetype().components().collect(); let components = &mut self.world.components; @@ -2231,13 +2450,7 @@ impl<'w> EntityWorldMut<'w> { ); // SAFETY: the `BundleInfo` for this `component_id` is initialized above - self.location = unsafe { - self.remove_bundle( - bundle_id, - #[cfg(feature = "track_location")] - caller, - ) - }; + self.location = unsafe { self.remove_bundle(bundle_id, caller) }; self.world.flush(); self.update_location(); self @@ -2257,10 +2470,7 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[track_caller] pub fn despawn(self) { - self.despawn_with_caller( - #[cfg(feature = "track_location")] - Location::caller(), - ); + self.despawn_with_caller(MaybeLocation::caller()); } /// Despawns the provided entity and its descendants. @@ -2272,10 +2482,7 @@ impl<'w> EntityWorldMut<'w> { self.despawn(); } - pub(crate) fn despawn_with_caller( - self, - #[cfg(feature = "track_location")] caller: &'static Location, - ) { + pub(crate) fn despawn_with_caller(self, caller: MaybeLocation) { self.assert_not_despawned(); let world = self.world; let archetype = &world.archetypes[self.location.archetype_id]; @@ -2294,7 +2501,6 @@ impl<'w> EntityWorldMut<'w> { ON_DESPAWN, self.entity, archetype.components(), - #[cfg(feature = "track_location")] caller, ); } @@ -2302,7 +2508,6 @@ impl<'w> EntityWorldMut<'w> { archetype, self.entity, archetype.components(), - #[cfg(feature = "track_location")] caller, ); if archetype.has_replace_observer() { @@ -2310,7 +2515,6 @@ impl<'w> EntityWorldMut<'w> { ON_REPLACE, self.entity, archetype.components(), - #[cfg(feature = "track_location")] caller, ); } @@ -2318,15 +2522,14 @@ impl<'w> EntityWorldMut<'w> { archetype, self.entity, archetype.components(), - #[cfg(feature = "track_location")] caller, + RelationshipHookMode::Run, ); if archetype.has_remove_observer() { deferred_world.trigger_observers( ON_REMOVE, self.entity, archetype.components(), - #[cfg(feature = "track_location")] caller, ); } @@ -2334,7 +2537,6 @@ impl<'w> EntityWorldMut<'w> { archetype, self.entity, archetype.components(), - #[cfg(feature = "track_location")] caller, ); } @@ -2406,14 +2608,11 @@ impl<'w> EntityWorldMut<'w> { } world.flush(); - #[cfg(feature = "track_location")] - { - // SAFETY: No structural changes - unsafe { - world - .entities_mut() - .set_spawned_or_despawned_by(self.entity.index(), caller); - } + // SAFETY: No structural changes + unsafe { + world + .entities_mut() + .set_spawned_or_despawned_by(self.entity.index(), caller); } } @@ -2531,11 +2730,11 @@ impl<'w> EntityWorldMut<'w> { /// let mut entity = world.spawn_empty(); /// entity.entry().or_insert_with(|| Comp(4)); /// # let entity_id = entity.id(); - /// assert_eq!(world.query::<&Comp>().single(&world).0, 4); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 4); /// /// # let mut entity = world.get_entity_mut(entity_id).unwrap(); /// entity.entry::().and_modify(|mut c| c.0 += 1); - /// assert_eq!(world.query::<&Comp>().single(&world).0, 5); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 5); /// ``` /// /// # Panics @@ -2579,24 +2778,17 @@ impl<'w> EntityWorldMut<'w> { &mut self, observer: impl IntoObserverSystem, ) -> &mut Self { - self.observe_with_caller( - observer, - #[cfg(feature = "track_location")] - Location::caller(), - ) + self.observe_with_caller(observer, MaybeLocation::caller()) } pub(crate) fn observe_with_caller( &mut self, observer: impl IntoObserverSystem, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) -> &mut Self { self.assert_not_despawned(); - self.world.spawn_with_caller( - Observer::new(observer).with_entity(self.entity), - #[cfg(feature = "track_location")] - caller, - ); + self.world + .spawn_with_caller(Observer::new(observer).with_entity(self.entity), caller); self.world.flush(); self.update_location(); self @@ -2754,12 +2946,11 @@ impl<'w> EntityWorldMut<'w> { } /// Returns the source code location from which this entity has last been spawned. - #[cfg(feature = "track_location")] - pub fn spawned_by(&self) -> &'static Location<'static> { + pub fn spawned_by(&self) -> MaybeLocation { self.world() .entities() .entity_get_spawned_or_despawned_by(self.entity) - .unwrap() + .map(|location| location.unwrap()) } } @@ -2770,40 +2961,37 @@ unsafe fn trigger_on_replace_and_on_remove_hooks_and_observers( archetype: &Archetype, entity: Entity, bundle_info: &BundleInfo, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { + let bundle_components_in_archetype = || { + bundle_info + .iter_explicit_components() + .filter(|component_id| archetype.contains(*component_id)) + }; if archetype.has_replace_observer() { deferred_world.trigger_observers( ON_REPLACE, entity, - bundle_info.iter_explicit_components(), - #[cfg(feature = "track_location")] + bundle_components_in_archetype(), caller, ); } deferred_world.trigger_on_replace( archetype, entity, - bundle_info.iter_explicit_components(), - #[cfg(feature = "track_location")] + bundle_components_in_archetype(), caller, + RelationshipHookMode::Run, ); if archetype.has_remove_observer() { deferred_world.trigger_observers( ON_REMOVE, entity, - bundle_info.iter_explicit_components(), - #[cfg(feature = "track_location")] + bundle_components_in_archetype(), caller, ); } - deferred_world.trigger_on_remove( - archetype, - entity, - bundle_info.iter_explicit_components(), - #[cfg(feature = "track_location")] - caller, - ); + deferred_world.trigger_on_remove(archetype, entity, bundle_components_in_archetype(), caller); } /// A view into a single entity and component in a world, which may either be vacant or occupied. @@ -2832,7 +3020,7 @@ impl<'w, 'a, T: Component> Entry<'w, 'a, T> { /// let mut entity = world.spawn(Comp(0)); /// /// entity.entry::().and_modify(|mut c| c.0 += 1); - /// assert_eq!(world.query::<&Comp>().single(&world).0, 1); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 1); /// ``` #[inline] pub fn and_modify)>(self, f: F) -> Self { @@ -2891,11 +3079,11 @@ impl<'w, 'a, T: Component> Entry<'w, 'a, T> { /// /// entity.entry().or_insert(Comp(4)); /// # let entity_id = entity.id(); - /// assert_eq!(world.query::<&Comp>().single(&world).0, 4); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 4); /// /// # let mut entity = world.get_entity_mut(entity_id).unwrap(); /// entity.entry().or_insert(Comp(15)).into_mut().0 *= 2; - /// assert_eq!(world.query::<&Comp>().single(&world).0, 8); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 8); /// ``` #[inline] pub fn or_insert(self, default: T) -> OccupiedEntry<'w, 'a, T> { @@ -2919,7 +3107,7 @@ impl<'w, 'a, T: Component> Entry<'w, 'a, T> { /// let mut entity = world.spawn_empty(); /// /// entity.entry().or_insert_with(|| Comp(4)); - /// assert_eq!(world.query::<&Comp>().single(&world).0, 4); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 4); /// ``` #[inline] pub fn or_insert_with T>(self, default: F) -> OccupiedEntry<'w, 'a, T> { @@ -2945,7 +3133,7 @@ impl<'w, 'a, T: Component + Default> Entry<'w, 'a, T> { /// let mut entity = world.spawn_empty(); /// /// entity.entry::().or_default(); - /// assert_eq!(world.query::<&Comp>().single(&world).0, 0); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 0); /// ``` #[inline] pub fn or_default(self) -> OccupiedEntry<'w, 'a, T> { @@ -3003,7 +3191,7 @@ impl<'w, 'a, T: Component> OccupiedEntry<'w, 'a, T> { /// o.insert(Comp(10)); /// } /// - /// assert_eq!(world.query::<&Comp>().single(&world).0, 10); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 10); /// ``` #[inline] pub fn insert(&mut self, component: T) { @@ -3061,7 +3249,7 @@ impl<'w, 'a, T: Component> OccupiedEntry<'w, 'a, T> { /// o.get_mut().0 += 2 /// } /// - /// assert_eq!(world.query::<&Comp>().single(&world).0, 17); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 17); /// ``` #[inline] pub fn get_mut(&mut self) -> Mut<'_, T> { @@ -3090,7 +3278,7 @@ impl<'w, 'a, T: Component> OccupiedEntry<'w, 'a, T> { /// o.into_mut().0 += 10; /// } /// - /// assert_eq!(world.query::<&Comp>().single(&world).0, 15); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 15); /// ``` #[inline] pub fn into_mut(self) -> Mut<'a, T> { @@ -3122,7 +3310,7 @@ impl<'w, 'a, T: Component> VacantEntry<'w, 'a, T> { /// v.insert(Comp(10)); /// } /// - /// assert_eq!(world.query::<&Comp>().single(&world).0, 10); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 10); /// ``` #[inline] pub fn insert(self, component: T) -> OccupiedEntry<'w, 'a, T> { @@ -3154,7 +3342,7 @@ impl<'w, 'a, T: Component> VacantEntry<'w, 'a, T> { /// .data::<&A>() /// .build(); /// -/// let filtered_entity: FilteredEntityRef = query.single(&mut world); +/// let filtered_entity: FilteredEntityRef = query.single(&mut world).unwrap(); /// let component: &A = filtered_entity.get().unwrap(); /// /// // Here `FilteredEntityRef` is nested in a tuple, so it does not have access to `&A`. @@ -3162,7 +3350,7 @@ impl<'w, 'a, T: Component> VacantEntry<'w, 'a, T> { /// .data::<&A>() /// .build(); /// -/// let (_, filtered_entity) = query.single(&mut world); +/// let (_, filtered_entity) = query.single(&mut world).unwrap(); /// assert!(filtered_entity.get::
().is_none()); /// ``` #[derive(Clone)] @@ -3175,7 +3363,7 @@ impl<'w> FilteredEntityRef<'w> { /// # Safety /// - No `&mut World` can exist from the underlying `UnsafeWorldCell` /// - If `access` takes read access to a component no mutable reference to that - /// component can exist at the same time as the returned [`FilteredEntityMut`] + /// component can exist at the same time as the returned [`FilteredEntityMut`] /// - If `access` takes any access for a component `entity` must have that component. #[inline] pub(crate) unsafe fn new(entity: UnsafeEntityCell<'w>, access: Access) -> Self { @@ -3226,7 +3414,7 @@ impl<'w> FilteredEntityRef<'w> { /// /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using - /// [`Self::contains_type_id`]. + /// [`Self::contains_type_id`]. #[inline] pub fn contains_id(&self, component_id: ComponentId) -> bool { self.entity.contains_id(component_id) @@ -3315,8 +3503,7 @@ impl<'w> FilteredEntityRef<'w> { } /// Returns the source code location from which this entity has been spawned. - #[cfg(feature = "track_location")] - pub fn spawned_by(&self) -> &'static Location<'static> { + pub fn spawned_by(&self) -> MaybeLocation { self.entity.spawned_by() } } @@ -3411,6 +3598,24 @@ impl<'a> From<&'a EntityWorldMut<'_>> for FilteredEntityRef<'a> { } } +impl<'a, B: Bundle> From<&'a EntityRefExcept<'_, B>> for FilteredEntityRef<'a> { + fn from(value: &'a EntityRefExcept<'_, B>) -> Self { + // SAFETY: + // - The FilteredEntityRef has the same component access as the given EntityRefExcept. + unsafe { + let mut access = Access::default(); + access.read_all(); + let components = value.entity.world().components(); + B::get_component_ids(components, &mut |maybe_id| { + if let Some(id) = maybe_id { + access.remove_component_read(id); + } + }); + FilteredEntityRef::new(value.entity, access) + } + } +} + impl PartialEq for FilteredEntityRef<'_> { fn eq(&self, other: &Self) -> bool { self.entity() == other.entity() @@ -3439,14 +3644,14 @@ impl Hash for FilteredEntityRef<'_> { } } -impl EntityBorrow for FilteredEntityRef<'_> { +impl ContainsEntity for FilteredEntityRef<'_> { fn entity(&self) -> Entity { self.id() } } // SAFETY: This type represents one Entity. We implement the comparison traits based on that Entity. -unsafe impl TrustedEntityBorrow for FilteredEntityRef<'_> {} +unsafe impl EntityEquivalent for FilteredEntityRef<'_> {} /// Provides mutable access to a single entity and some of its components defined by the contained [`Access`]. /// @@ -3468,7 +3673,7 @@ unsafe impl TrustedEntityBorrow for FilteredEntityRef<'_> {} /// .data::<&mut A>() /// .build(); /// -/// let mut filtered_entity: FilteredEntityMut = query.single_mut(&mut world); +/// let mut filtered_entity: FilteredEntityMut = query.single_mut(&mut world).unwrap(); /// let component: Mut = filtered_entity.get_mut().unwrap(); /// /// // Here `FilteredEntityMut` is nested in a tuple, so it does not have access to `&mut A`. @@ -3476,7 +3681,7 @@ unsafe impl TrustedEntityBorrow for FilteredEntityRef<'_> {} /// .data::<&mut A>() /// .build(); /// -/// let (_, mut filtered_entity) = query.single_mut(&mut world); +/// let (_, mut filtered_entity) = query.single_mut(&mut world).unwrap(); /// assert!(filtered_entity.get_mut::().is_none()); /// ``` pub struct FilteredEntityMut<'w> { @@ -3488,9 +3693,9 @@ impl<'w> FilteredEntityMut<'w> { /// # Safety /// - No `&mut World` can exist from the underlying `UnsafeWorldCell` /// - If `access` takes read access to a component no mutable reference to that - /// component can exist at the same time as the returned [`FilteredEntityMut`] + /// component can exist at the same time as the returned [`FilteredEntityMut`] /// - If `access` takes write access to a component, no reference to that component - /// may exist at the same time as the returned [`FilteredEntityMut`] + /// may exist at the same time as the returned [`FilteredEntityMut`] /// - If `access` takes any access for a component `entity` must have that component. #[inline] pub(crate) unsafe fn new(entity: UnsafeEntityCell<'w>, access: Access) -> Self { @@ -3554,7 +3759,7 @@ impl<'w> FilteredEntityMut<'w> { /// /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using - /// [`Self::contains_type_id`]. + /// [`Self::contains_type_id`]. #[inline] pub fn contains_id(&self, component_id: ComponentId) -> bool { self.entity.contains_id(component_id) @@ -3679,8 +3884,7 @@ impl<'w> FilteredEntityMut<'w> { } /// Returns the source code location from which this entity has last been spawned. - #[cfg(feature = "track_location")] - pub fn spawned_by(&self) -> &'static Location<'static> { + pub fn spawned_by(&self) -> MaybeLocation { self.entity.spawned_by() } } @@ -3737,6 +3941,24 @@ impl<'a> From<&'a mut EntityWorldMut<'_>> for FilteredEntityMut<'a> { } } +impl<'a, B: Bundle> From<&'a EntityMutExcept<'_, B>> for FilteredEntityMut<'a> { + fn from(value: &'a EntityMutExcept<'_, B>) -> Self { + // SAFETY: + // - The FilteredEntityMut has the same component access as the given EntityMutExcept. + unsafe { + let mut access = Access::default(); + access.write_all(); + let components = value.entity.world().components(); + B::get_component_ids(components, &mut |maybe_id| { + if let Some(id) = maybe_id { + access.remove_component_read(id); + } + }); + FilteredEntityMut::new(value.entity, access) + } + } +} + impl PartialEq for FilteredEntityMut<'_> { fn eq(&self, other: &Self) -> bool { self.entity() == other.entity() @@ -3765,14 +3987,14 @@ impl Hash for FilteredEntityMut<'_> { } } -impl EntityBorrow for FilteredEntityMut<'_> { +impl ContainsEntity for FilteredEntityMut<'_> { fn entity(&self) -> Entity { self.id() } } // SAFETY: This type represents one Entity. We implement the comparison traits based on that Entity. -unsafe impl TrustedEntityBorrow for FilteredEntityMut<'_> {} +unsafe impl EntityEquivalent for FilteredEntityMut<'_> {} /// Error type returned by [`TryFrom`] conversions from filtered entity types /// ([`FilteredEntityRef`]/[`FilteredEntityMut`]) to full-access entity types @@ -3859,10 +4081,96 @@ where } /// Returns the source code location from which this entity has been spawned. - #[cfg(feature = "track_location")] - pub fn spawned_by(&self) -> &'static Location<'static> { + pub fn spawned_by(&self) -> MaybeLocation { self.entity.spawned_by() } + + /// Gets the component of the given [`ComponentId`] from the entity. + /// + /// **You should prefer to use the typed API [`Self::get`] where possible and only + /// use this in cases where the actual component types are not known at + /// compile time.** + /// + /// Unlike [`EntityRefExcept::get`], this returns a raw pointer to the component, + /// which is only valid while the [`EntityRefExcept`] is alive. + #[inline] + pub fn get_by_id(&self, component_id: ComponentId) -> Option> { + let components = self.entity.world().components(); + (!bundle_contains_component::(components, component_id)) + .then(|| { + // SAFETY: We have read access for this component + unsafe { self.entity.get_by_id(component_id) } + }) + .flatten() + } + + /// Returns `true` if the current entity has a component of type `T`. + /// Otherwise, this returns `false`. + /// + /// ## Notes + /// + /// If you do not know the concrete type of a component, consider using + /// [`Self::contains_id`] or [`Self::contains_type_id`]. + #[inline] + pub fn contains(&self) -> bool { + self.contains_type_id(TypeId::of::()) + } + + /// Returns `true` if the current entity has a component identified by `component_id`. + /// Otherwise, this returns false. + /// + /// ## Notes + /// + /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. + /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using + /// [`Self::contains_type_id`]. + #[inline] + pub fn contains_id(&self, component_id: ComponentId) -> bool { + self.entity.contains_id(component_id) + } + + /// Returns `true` if the current entity has a component with the type identified by `type_id`. + /// Otherwise, this returns false. + /// + /// ## Notes + /// + /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. + /// - If you have a [`ComponentId`] instead of a [`TypeId`], consider using [`Self::contains_id`]. + #[inline] + pub fn contains_type_id(&self, type_id: TypeId) -> bool { + self.entity.contains_type_id(type_id) + } + + /// Retrieves the change ticks for the given component. This can be useful for implementing change + /// detection in custom runtimes. + #[inline] + pub fn get_change_ticks(&self) -> Option { + let component_id = self.entity.world().components().get_id(TypeId::of::())?; + let components = self.entity.world().components(); + (!bundle_contains_component::(components, component_id)) + .then(|| { + // SAFETY: We have read access + unsafe { self.entity.get_change_ticks::() } + }) + .flatten() + } + + /// Retrieves the change ticks for the given [`ComponentId`]. This can be useful for implementing change + /// detection in custom runtimes. + /// + /// **You should prefer to use the typed API [`Self::get_change_ticks`] where possible and only + /// use this in cases where the actual component types are not known at + /// compile time.** + #[inline] + pub fn get_change_ticks_by_id(&self, component_id: ComponentId) -> Option { + let components = self.entity.world().components(); + (!bundle_contains_component::(components, component_id)) + .then(|| { + // SAFETY: We have read access + unsafe { self.entity.get_change_ticks_by_id(component_id) } + }) + .flatten() + } } impl<'a, B> From<&'a EntityMutExcept<'_, B>> for EntityRefExcept<'a, B> @@ -3912,14 +4220,14 @@ impl Hash for EntityRefExcept<'_, B> { } } -impl EntityBorrow for EntityRefExcept<'_, B> { +impl ContainsEntity for EntityRefExcept<'_, B> { fn entity(&self) -> Entity { self.id() } } // SAFETY: This type represents one Entity. We implement the comparison traits based on that Entity. -unsafe impl TrustedEntityBorrow for EntityRefExcept<'_, B> {} +unsafe impl EntityEquivalent for EntityRefExcept<'_, B> {} /// Provides mutable access to all components of an entity, with the exception /// of an explicit set. @@ -4017,10 +4325,81 @@ where } /// Returns the source code location from which this entity has been spawned. - #[cfg(feature = "track_location")] - pub fn spawned_by(&self) -> &'static Location<'static> { + pub fn spawned_by(&self) -> MaybeLocation { self.entity.spawned_by() } + + /// Returns `true` if the current entity has a component of type `T`. + /// Otherwise, this returns `false`. + /// + /// ## Notes + /// + /// If you do not know the concrete type of a component, consider using + /// [`Self::contains_id`] or [`Self::contains_type_id`]. + #[inline] + pub fn contains(&self) -> bool { + self.contains_type_id(TypeId::of::()) + } + + /// Returns `true` if the current entity has a component identified by `component_id`. + /// Otherwise, this returns false. + /// + /// ## Notes + /// + /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. + /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using + /// [`Self::contains_type_id`]. + #[inline] + pub fn contains_id(&self, component_id: ComponentId) -> bool { + self.entity.contains_id(component_id) + } + + /// Returns `true` if the current entity has a component with the type identified by `type_id`. + /// Otherwise, this returns false. + /// + /// ## Notes + /// + /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. + /// - If you have a [`ComponentId`] instead of a [`TypeId`], consider using [`Self::contains_id`]. + #[inline] + pub fn contains_type_id(&self, type_id: TypeId) -> bool { + self.entity.contains_type_id(type_id) + } + + /// Gets the component of the given [`ComponentId`] from the entity. + /// + /// **You should prefer to use the typed API [`Self::get`] where possible and only + /// use this in cases where the actual component types are not known at + /// compile time.** + /// + /// Unlike [`EntityMutExcept::get`], this returns a raw pointer to the component, + /// which is only valid while the [`EntityMutExcept`] is alive. + #[inline] + pub fn get_by_id(&'w self, component_id: ComponentId) -> Option> { + self.as_readonly().get_by_id(component_id) + } + + /// Gets a [`MutUntyped`] of the component of the given [`ComponentId`] from the entity. + /// + /// **You should prefer to use the typed API [`Self::get_mut`] where possible and only + /// use this in cases where the actual component types are not known at + /// compile time.** + /// + /// Unlike [`EntityMutExcept::get_mut`], this returns a raw pointer to the component, + /// which is only valid while the [`EntityMutExcept`] is alive. + #[inline] + pub fn get_mut_by_id( + &mut self, + component_id: ComponentId, + ) -> Option> { + let components = self.entity.world().components(); + (!bundle_contains_component::(components, component_id)) + .then(|| { + // SAFETY: We have write access + unsafe { self.entity.get_mut_by_id(component_id).ok() } + }) + .flatten() + } } impl PartialEq for EntityMutExcept<'_, B> { @@ -4051,14 +4430,14 @@ impl Hash for EntityMutExcept<'_, B> { } } -impl EntityBorrow for EntityMutExcept<'_, B> { +impl ContainsEntity for EntityMutExcept<'_, B> { fn entity(&self) -> Entity { self.id() } } // SAFETY: This type represents one Entity. We implement the comparison traits based on that Entity. -unsafe impl TrustedEntityBorrow for EntityMutExcept<'_, B> {} +unsafe impl EntityEquivalent for EntityMutExcept<'_, B> {} fn bundle_contains_component(components: &Components, query_id: ComponentId) -> bool where @@ -4078,7 +4457,7 @@ where /// # Safety /// /// - [`OwningPtr`] and [`StorageType`] iterators must correspond to the -/// [`BundleInfo`] used to construct [`BundleInserter`] +/// [`BundleInfo`] used to construct [`BundleInserter`] /// - [`Entity`] must correspond to [`EntityLocation`] unsafe fn insert_dynamic_bundle< 'a, @@ -4090,7 +4469,9 @@ unsafe fn insert_dynamic_bundle< location: EntityLocation, components: I, storage_types: S, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + mode: InsertMode, + caller: MaybeLocation, + relationship_hook_insert_mode: RelationshipHookMode, ) -> EntityLocation { struct DynamicInsertBundle<'a, I: Iterator)>> { components: I, @@ -4116,9 +4497,9 @@ unsafe fn insert_dynamic_bundle< entity, location, bundle, - InsertMode::Replace, - #[cfg(feature = "track_location")] + mode, caller, + relationship_hook_insert_mode, ) .0 } @@ -4232,6 +4613,26 @@ pub unsafe trait DynamicComponentFetch { self, cell: UnsafeEntityCell<'_>, ) -> Result, EntityComponentError>; + + /// Returns untyped mutable reference(s) to the component(s) with the + /// given [`ComponentId`]s, as determined by `self`. + /// Assumes all [`ComponentId`]s refer to mutable components. + /// + /// # Safety + /// + /// It is the caller's responsibility to ensure that: + /// - The given [`UnsafeEntityCell`] has mutable access to the fetched components. + /// - No other references to the fetched components exist at the same time. + /// - The requested components are all mutable. + /// + /// # Errors + /// + /// - Returns [`EntityComponentError::MissingComponent`] if a component is missing from the entity. + /// - Returns [`EntityComponentError::AliasedMutability`] if a component is requested multiple times. + unsafe fn fetch_mut_assume_mutable( + self, + cell: UnsafeEntityCell<'_>, + ) -> Result, EntityComponentError>; } // SAFETY: @@ -4257,6 +4658,15 @@ unsafe impl DynamicComponentFetch for ComponentId { unsafe { cell.get_mut_by_id(self) } .map_err(|_| EntityComponentError::MissingComponent(self)) } + + unsafe fn fetch_mut_assume_mutable( + self, + cell: UnsafeEntityCell<'_>, + ) -> Result, EntityComponentError> { + // SAFETY: caller ensures that the cell has mutable access to the component. + unsafe { cell.get_mut_assume_mutable_by_id(self) } + .map_err(|_| EntityComponentError::MissingComponent(self)) + } } // SAFETY: @@ -4279,6 +4689,13 @@ unsafe impl DynamicComponentFetch for [ComponentId; N] { ) -> Result, EntityComponentError> { <&Self>::fetch_mut(&self, cell) } + + unsafe fn fetch_mut_assume_mutable( + self, + cell: UnsafeEntityCell<'_>, + ) -> Result, EntityComponentError> { + <&Self>::fetch_mut_assume_mutable(&self, cell) + } } // SAFETY: @@ -4333,6 +4750,34 @@ unsafe impl DynamicComponentFetch for &'_ [ComponentId; N] { Ok(ptrs) } + + unsafe fn fetch_mut_assume_mutable( + self, + cell: UnsafeEntityCell<'_>, + ) -> Result, EntityComponentError> { + // Check for duplicate component IDs. + for i in 0..self.len() { + for j in 0..i { + if self[i] == self[j] { + return Err(EntityComponentError::AliasedMutability(self[i])); + } + } + } + + let mut ptrs = [const { MaybeUninit::uninit() }; N]; + for (ptr, &id) in core::iter::zip(&mut ptrs, self) { + *ptr = MaybeUninit::new( + // SAFETY: caller ensures that the cell has mutable access to the component. + unsafe { cell.get_mut_assume_mutable_by_id(id) } + .map_err(|_| EntityComponentError::MissingComponent(id))?, + ); + } + + // SAFETY: Each ptr was initialized in the loop above. + let ptrs = ptrs.map(|ptr| unsafe { MaybeUninit::assume_init(ptr) }); + + Ok(ptrs) + } } // SAFETY: @@ -4379,6 +4824,30 @@ unsafe impl DynamicComponentFetch for &'_ [ComponentId] { } Ok(ptrs) } + + unsafe fn fetch_mut_assume_mutable( + self, + cell: UnsafeEntityCell<'_>, + ) -> Result, EntityComponentError> { + // Check for duplicate component IDs. + for i in 0..self.len() { + for j in 0..i { + if self[i] == self[j] { + return Err(EntityComponentError::AliasedMutability(self[i])); + } + } + } + + let mut ptrs = Vec::with_capacity(self.len()); + for &id in self { + ptrs.push( + // SAFETY: caller ensures that the cell has mutable access to the component. + unsafe { cell.get_mut_assume_mutable_by_id(id) } + .map_err(|_| EntityComponentError::MissingComponent(id))?, + ); + } + Ok(ptrs) + } } // SAFETY: @@ -4418,6 +4887,22 @@ unsafe impl DynamicComponentFetch for &'_ HashSet { } Ok(ptrs) } + + unsafe fn fetch_mut_assume_mutable( + self, + cell: UnsafeEntityCell<'_>, + ) -> Result, EntityComponentError> { + let mut ptrs = HashMap::with_capacity_and_hasher(self.len(), Default::default()); + for &id in self { + ptrs.insert( + id, + // SAFETY: caller ensures that the cell has mutable access to the component. + unsafe { cell.get_mut_assume_mutable_by_id(id) } + .map_err(|_| EntityComponentError::MissingComponent(id))?, + ); + } + Ok(ptrs) + } } #[cfg(test)] @@ -4425,13 +4910,11 @@ mod tests { use alloc::{vec, vec::Vec}; use bevy_ptr::{OwningPtr, Ptr}; use core::panic::AssertUnwindSafe; - - #[cfg(feature = "track_location")] - use {core::panic::Location, std::sync::OnceLock}; + use std::sync::OnceLock; use crate::component::HookContext; use crate::{ - change_detection::MutUntyped, + change_detection::{MaybeLocation, MutUntyped}, component::ComponentId, prelude::*, system::{assert_is_system, RunSystemOnce as _}, @@ -5674,7 +6157,7 @@ mod tests { struct A; #[derive(Component, Clone, PartialEq, Debug, Default)] - #[require(C(|| C(3)))] + #[require(C(3))] struct B; #[derive(Component, Clone, PartialEq, Debug, Default)] @@ -5710,7 +6193,6 @@ mod tests { } #[test] - #[cfg(feature = "track_location")] fn update_despawned_by_after_observers() { let mut world = World::new(); @@ -5718,19 +6200,19 @@ mod tests { #[component(on_remove = get_tracked)] struct C; - static TRACKED: OnceLock<&'static Location<'static>> = OnceLock::new(); + static TRACKED: OnceLock = OnceLock::new(); fn get_tracked(world: DeferredWorld, HookContext { entity, .. }: HookContext) { TRACKED.get_or_init(|| { world .entities .entity_get_spawned_or_despawned_by(entity) - .unwrap() + .map(|l| l.unwrap()) }); } #[track_caller] - fn caller_spawn(world: &mut World) -> (Entity, &'static Location<'static>) { - let caller = Location::caller(); + fn caller_spawn(world: &mut World) -> (Entity, MaybeLocation) { + let caller = MaybeLocation::caller(); (world.spawn(C).id(), caller) } let (entity, spawner) = caller_spawn(&mut world); @@ -5740,13 +6222,13 @@ mod tests { world .entities() .entity_get_spawned_or_despawned_by(entity) - .unwrap() + .map(|l| l.unwrap()) ); #[track_caller] - fn caller_despawn(world: &mut World, entity: Entity) -> &'static Location<'static> { + fn caller_despawn(world: &mut World, entity: Entity) -> MaybeLocation { world.despawn(entity); - Location::caller() + MaybeLocation::caller() } let despawner = caller_despawn(&mut world, entity); @@ -5756,7 +6238,7 @@ mod tests { world .entities() .entity_get_spawned_or_despawned_by(entity) - .unwrap() + .map(|l| l.unwrap()) ); } @@ -5842,4 +6324,42 @@ mod tests { assert_eq!(archetype_pointer_before, archetype_pointer_after); } + + #[test] + fn bundle_remove_only_triggers_for_present_components() { + let mut world = World::default(); + + #[derive(Component)] + struct A; + + #[derive(Component)] + struct B; + + #[derive(Resource, PartialEq, Eq, Debug)] + struct Tracker { + a: bool, + b: bool, + } + + world.insert_resource(Tracker { a: false, b: false }); + let entity = world.spawn(A).id(); + + world.add_observer(|_: Trigger, mut tracker: ResMut| { + tracker.a = true; + }); + world.add_observer(|_: Trigger, mut tracker: ResMut| { + tracker.b = true; + }); + + world.entity_mut(entity).remove::<(A, B)>(); + + assert_eq!( + world.resource::(), + &Tracker { + a: true, + // The entity didn't have a B component, so it should not have been triggered. + b: false, + } + ); + } } diff --git a/crates/bevy_ecs/src/world/error.rs b/crates/bevy_ecs/src/world/error.rs index 7609b15e01..3527967942 100644 --- a/crates/bevy_ecs/src/world/error.rs +++ b/crates/bevy_ecs/src/world/error.rs @@ -1,39 +1,26 @@ //! Contains error types returned by bevy's schedule. use alloc::vec::Vec; -use thiserror::Error; use crate::{ component::ComponentId, - entity::{Entity, EntityDoesNotExistDetails}, + entity::{Entity, EntityDoesNotExistError}, schedule::InternedScheduleLabel, }; /// The error type returned by [`World::try_run_schedule`] if the provided schedule does not exist. /// /// [`World::try_run_schedule`]: crate::world::World::try_run_schedule -#[derive(Error, Debug)] +#[derive(thiserror::Error, Debug)] #[error("The schedule with the label {0:?} was not found.")] pub struct TryRunScheduleError(pub InternedScheduleLabel); -/// The error type returned by [`World::try_despawn`] if the provided entity does not exist. -/// -/// [`World::try_despawn`]: crate::world::World::try_despawn -#[derive(Error, Debug, Clone, Copy)] -#[error("Could not despawn the entity with ID {entity} because it {details}")] -pub struct TryDespawnError { - /// The entity's ID. - pub entity: Entity, - /// Details on why the entity does not exist, if available. - pub details: EntityDoesNotExistDetails, -} - /// The error type returned by [`World::try_insert_batch`] and [`World::try_insert_batch_if_new`] /// if any of the provided entities do not exist. /// /// [`World::try_insert_batch`]: crate::world::World::try_insert_batch /// [`World::try_insert_batch_if_new`]: crate::world::World::try_insert_batch_if_new -#[derive(Error, Debug, Clone)] +#[derive(thiserror::Error, Debug, Clone)] #[error("Could not insert bundles of type {bundle_type} into the entities with the following IDs because they do not exist: {entities:?}")] pub struct TryInsertBatchError { /// The bundles' type name. @@ -42,8 +29,13 @@ pub struct TryInsertBatchError { pub entities: Vec, } +/// An error that occurs when a specified [`Entity`] could not be despawned. +#[derive(thiserror::Error, Debug, Clone, Copy)] +#[error("Could not despawn entity: {0}")] +pub struct EntityDespawnError(#[from] pub EntityMutableFetchError); + /// An error that occurs when dynamically retrieving components from an entity. -#[derive(Error, Debug, Clone, Copy, PartialEq, Eq)] +#[derive(thiserror::Error, Debug, Clone, Copy, PartialEq, Eq)] pub enum EntityComponentError { /// The component with the given [`ComponentId`] does not exist on the entity. #[error("The component with ID {0:?} does not exist on the entity.")] @@ -54,24 +46,26 @@ pub enum EntityComponentError { } /// An error that occurs when fetching entities mutably from a world. -#[derive(Error, Debug, Clone, Copy)] -pub enum EntityFetchError { +#[derive(thiserror::Error, Debug, Clone, Copy, PartialEq, Eq)] +pub enum EntityMutableFetchError { /// The entity with the given ID does not exist. - #[error("The entity with ID {0} {1}")] - NoSuchEntity(Entity, EntityDoesNotExistDetails), + #[error(transparent)] + EntityDoesNotExist(#[from] EntityDoesNotExistError), /// The entity with the given ID was requested mutably more than once. #[error("The entity with ID {0} was requested mutably more than once")] AliasedMutability(Entity), } -impl PartialEq for EntityFetchError { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Self::NoSuchEntity(e1, _), Self::NoSuchEntity(e2, _)) if e1 == e2 => true, - (Self::AliasedMutability(e1), Self::AliasedMutability(e2)) if e1 == e2 => true, - _ => false, - } - } +/// An error that occurs when getting a resource of a given type in a world. +#[derive(thiserror::Error, Debug, Clone, Copy, PartialEq, Eq)] +pub enum ResourceFetchError { + /// The resource has never been initialized or registered with the world. + #[error("The resource has never been initialized or registered with the world. Did you forget to add it using `app.insert_resource` / `app.init_resource`?")] + NotRegistered, + /// The resource with the given [`ComponentId`] does not currently exist in the world. + #[error("The resource with ID {0:?} does not currently exist in the world.")] + DoesNotExist(ComponentId), + /// Cannot get access to the resource with the given [`ComponentId`] in the world as it conflicts with an on going operation. + #[error("Cannot get access to the resource with ID {0:?} in the world as it conflicts with an on going operation.")] + NoResourceAccess(ComponentId), } - -impl Eq for EntityFetchError {} diff --git a/crates/bevy_ecs/src/world/filtered_resource.rs b/crates/bevy_ecs/src/world/filtered_resource.rs index 78794866e8..a9fac308fa 100644 --- a/crates/bevy_ecs/src/world/filtered_resource.rs +++ b/crates/bevy_ecs/src/world/filtered_resource.rs @@ -5,9 +5,9 @@ use crate::{ resource::Resource, world::{unsafe_world_cell::UnsafeWorldCell, World}, }; -use bevy_ptr::Ptr; -#[cfg(feature = "track_location")] -use bevy_ptr::UnsafeCellDeref; +use bevy_ptr::{Ptr, UnsafeCellDeref}; + +use super::error::ResourceFetchError; /// Provides read-only access to a set of [`Resource`]s defined by the contained [`Access`]. /// @@ -44,9 +44,9 @@ use bevy_ptr::UnsafeCellDeref; /// /// fn resource_system(res: FilteredResources) { /// // The resource exists, but we have no access, so we can't read it. -/// assert!(res.get::().is_none()); +/// assert!(res.get::().is_err()); /// // The resource doesn't exist, so we can't read it. -/// assert!(res.get::().is_none()); +/// assert!(res.get::().is_err()); /// // The resource exists and we have access, so we can read it. /// let c = res.get::().unwrap(); /// // The type parameter can be left out if it can be determined from use. @@ -146,39 +146,45 @@ impl<'w, 's> FilteredResources<'w, 's> { } /// Returns `true` if the `FilteredResources` has access to the given resource. - /// Note that [`Self::get()`] may still return `None` if the resource does not exist. + /// Note that [`Self::get()`] may still return `Err` if the resource does not exist. pub fn has_read(&self) -> bool { let component_id = self.world.components().resource_id::(); component_id.is_some_and(|component_id| self.access.has_resource_read(component_id)) } /// Gets a reference to the resource of the given type if it exists and the `FilteredResources` has access to it. - pub fn get(&self) -> Option> { - let component_id = self.world.components().resource_id::()?; + pub fn get(&self) -> Result, ResourceFetchError> { + let component_id = self + .world + .components() + .resource_id::() + .ok_or(ResourceFetchError::NotRegistered)?; if !self.access.has_resource_read(component_id) { - return None; + return Err(ResourceFetchError::NoResourceAccess(component_id)); } + // SAFETY: We have read access to this resource - unsafe { self.world.get_resource_with_ticks(component_id) }.map( - |(value, ticks, _caller)| Ref { - // SAFETY: `component_id` was obtained from the type ID of `R`. - value: unsafe { value.deref() }, - // SAFETY: We have read access to the resource, so no mutable reference can exist. - ticks: unsafe { Ticks::from_tick_cells(ticks, self.last_run, self.this_run) }, - #[cfg(feature = "track_location")] - // SAFETY: We have read access to the resource, so no mutable reference can exist. - changed_by: unsafe { _caller.deref() }, - }, - ) + let (value, ticks, caller) = unsafe { self.world.get_resource_with_ticks(component_id) } + .ok_or(ResourceFetchError::DoesNotExist(component_id))?; + + Ok(Ref { + // SAFETY: `component_id` was obtained from the type ID of `R`. + value: unsafe { value.deref() }, + // SAFETY: We have read access to the resource, so no mutable reference can exist. + ticks: unsafe { Ticks::from_tick_cells(ticks, self.last_run, self.this_run) }, + // SAFETY: We have read access to the resource, so no mutable reference can exist. + changed_by: unsafe { caller.map(|caller| caller.deref()) }, + }) } /// Gets a pointer to the resource with the given [`ComponentId`] if it exists and the `FilteredResources` has access to it. - pub fn get_by_id(&self, component_id: ComponentId) -> Option> { + pub fn get_by_id(&self, component_id: ComponentId) -> Result, ResourceFetchError> { if !self.access.has_resource_read(component_id) { - return None; + return Err(ResourceFetchError::NoResourceAccess(component_id)); } // SAFETY: We have read access to this resource unsafe { self.world.get_resource_by_id(component_id) } + .ok_or(ResourceFetchError::DoesNotExist(component_id)) } } @@ -282,14 +288,14 @@ impl<'w> From<&'w mut World> for FilteredResources<'w, 'static> { /// /// fn resource_system(mut res: FilteredResourcesMut) { /// // The resource exists, but we have no access, so we can't read it or write it. -/// assert!(res.get::().is_none()); -/// assert!(res.get_mut::().is_none()); +/// assert!(res.get::().is_err()); +/// assert!(res.get_mut::().is_err()); /// // The resource doesn't exist, so we can't read it or write it. -/// assert!(res.get::().is_none()); -/// assert!(res.get_mut::().is_none()); +/// assert!(res.get::().is_err()); +/// assert!(res.get_mut::().is_err()); /// // The resource exists and we have read access, so we can read it but not write it. /// let c = res.get::().unwrap(); -/// assert!(res.get_mut::().is_none()); +/// assert!(res.get_mut::().is_err()); /// // The resource exists and we have write access, so we can read it or write it. /// let d = res.get::().unwrap(); /// let d = res.get_mut::().unwrap(); @@ -408,49 +414,55 @@ impl<'w, 's> FilteredResourcesMut<'w, 's> { } /// Returns `true` if the `FilteredResources` has read access to the given resource. - /// Note that [`Self::get()`] may still return `None` if the resource does not exist. + /// Note that [`Self::get()`] may still return `Err` if the resource does not exist. pub fn has_read(&self) -> bool { let component_id = self.world.components().resource_id::(); component_id.is_some_and(|component_id| self.access.has_resource_read(component_id)) } /// Returns `true` if the `FilteredResources` has write access to the given resource. - /// Note that [`Self::get_mut()`] may still return `None` if the resource does not exist. + /// Note that [`Self::get_mut()`] may still return `Err` if the resource does not exist. pub fn has_write(&self) -> bool { let component_id = self.world.components().resource_id::(); component_id.is_some_and(|component_id| self.access.has_resource_write(component_id)) } /// Gets a reference to the resource of the given type if it exists and the `FilteredResources` has access to it. - pub fn get(&self) -> Option> { + pub fn get(&self) -> Result, ResourceFetchError> { self.as_readonly().get() } /// Gets a pointer to the resource with the given [`ComponentId`] if it exists and the `FilteredResources` has access to it. - pub fn get_by_id(&self, component_id: ComponentId) -> Option> { + pub fn get_by_id(&self, component_id: ComponentId) -> Result, ResourceFetchError> { self.as_readonly().get_by_id(component_id) } /// Gets a mutable reference to the resource of the given type if it exists and the `FilteredResources` has access to it. - pub fn get_mut(&mut self) -> Option> { + pub fn get_mut(&mut self) -> Result, ResourceFetchError> { // SAFETY: We have exclusive access to the resources in `access` for `'_`, and we shorten the returned lifetime to that. unsafe { self.get_mut_unchecked() } } /// Gets a mutable pointer to the resource with the given [`ComponentId`] if it exists and the `FilteredResources` has access to it. - pub fn get_mut_by_id(&mut self, component_id: ComponentId) -> Option> { + pub fn get_mut_by_id( + &mut self, + component_id: ComponentId, + ) -> Result, ResourceFetchError> { // SAFETY: We have exclusive access to the resources in `access` for `'_`, and we shorten the returned lifetime to that. unsafe { self.get_mut_by_id_unchecked(component_id) } } /// Consumes self and gets mutable access to resource of the given type with the world `'w` lifetime if it exists and the `FilteredResources` has access to it. - pub fn into_mut(mut self) -> Option> { + pub fn into_mut(mut self) -> Result, ResourceFetchError> { // SAFETY: This consumes self, so we have exclusive access to the resources in `access` for the entirety of `'w`. unsafe { self.get_mut_unchecked() } } /// Consumes self and gets mutable access to resource with the given [`ComponentId`] with the world `'w` lifetime if it exists and the `FilteredResources` has access to it. - pub fn into_mut_by_id(mut self, component_id: ComponentId) -> Option> { + pub fn into_mut_by_id( + mut self, + component_id: ComponentId, + ) -> Result, ResourceFetchError> { // SAFETY: This consumes self, so we have exclusive access to the resources in `access` for the entirety of `'w`. unsafe { self.get_mut_by_id_unchecked(component_id) } } @@ -458,8 +470,12 @@ impl<'w, 's> FilteredResourcesMut<'w, 's> { /// Gets a mutable pointer to the resource of the given type if it exists and the `FilteredResources` has access to it. /// # Safety /// It is the callers responsibility to ensure that there are no conflicting borrows of anything in `access` for the duration of the returned value. - unsafe fn get_mut_unchecked(&mut self) -> Option> { - let component_id = self.world.components().resource_id::()?; + unsafe fn get_mut_unchecked(&mut self) -> Result, ResourceFetchError> { + let component_id = self + .world + .components() + .resource_id::() + .ok_or(ResourceFetchError::NotRegistered)?; // SAFETY: THe caller ensures that there are no conflicting borrows. unsafe { self.get_mut_by_id_unchecked(component_id) } // SAFETY: The underlying type of the resource is `R`. @@ -472,22 +488,23 @@ impl<'w, 's> FilteredResourcesMut<'w, 's> { unsafe fn get_mut_by_id_unchecked( &mut self, component_id: ComponentId, - ) -> Option> { + ) -> Result, ResourceFetchError> { if !self.access.has_resource_write(component_id) { - return None; + return Err(ResourceFetchError::NoResourceAccess(component_id)); } - // SAFETY: We have access to this resource in `access`, and the caller ensures that there are no conflicting borrows for the duration of the returned value. - unsafe { self.world.get_resource_with_ticks(component_id) }.map( - |(value, ticks, _caller)| MutUntyped { - // SAFETY: We have exclusive access to the underlying storage. - value: unsafe { value.assert_unique() }, - // SAFETY: We have exclusive access to the underlying storage. - ticks: unsafe { TicksMut::from_tick_cells(ticks, self.last_run, self.this_run) }, - #[cfg(feature = "track_location")] - // SAFETY: We have exclusive access to the underlying storage. - changed_by: unsafe { _caller.deref_mut() }, - }, - ) + + // SAFETY: We have read access to this resource + let (value, ticks, caller) = unsafe { self.world.get_resource_with_ticks(component_id) } + .ok_or(ResourceFetchError::DoesNotExist(component_id))?; + + Ok(MutUntyped { + // SAFETY: We have exclusive access to the underlying storage. + value: unsafe { value.assert_unique() }, + // SAFETY: We have exclusive access to the underlying storage. + ticks: unsafe { TicksMut::from_tick_cells(ticks, self.last_run, self.this_run) }, + // SAFETY: We have exclusive access to the underlying storage. + changed_by: unsafe { caller.map(|caller| caller.deref_mut()) }, + }) } } @@ -546,7 +563,7 @@ impl<'w> FilteredResourcesBuilder<'w> { /// Add accesses required to read the resource of the given type. pub fn add_read(&mut self) -> &mut Self { - let component_id = self.world.components.register_resource::(); + let component_id = self.world.components_registrator().register_resource::(); self.add_read_by_id(component_id) } @@ -592,7 +609,7 @@ impl<'w> FilteredResourcesMutBuilder<'w> { /// Add accesses required to read the resource of the given type. pub fn add_read(&mut self) -> &mut Self { - let component_id = self.world.components.register_resource::(); + let component_id = self.world.components_registrator().register_resource::(); self.add_read_by_id(component_id) } @@ -610,7 +627,7 @@ impl<'w> FilteredResourcesMutBuilder<'w> { /// Add accesses required to get mutable access to the resource of the given type. pub fn add_write(&mut self) -> &mut Self { - let component_id = self.world.components.register_resource::(); + let component_id = self.world.components_registrator().register_resource::(); self.add_write_by_id(component_id) } diff --git a/crates/bevy_ecs/src/world/identifier.rs b/crates/bevy_ecs/src/world/identifier.rs index 221ddd8210..6b1c803e75 100644 --- a/crates/bevy_ecs/src/world/identifier.rs +++ b/crates/bevy_ecs/src/world/identifier.rs @@ -4,7 +4,7 @@ use crate::{ system::{ExclusiveSystemParam, ReadOnlySystemParam, SystemMeta, SystemParam}, world::{FromWorld, World}, }; -use bevy_platform_support::sync::atomic::{AtomicUsize, Ordering}; +use bevy_platform::sync::atomic::{AtomicUsize, Ordering}; use super::unsafe_world_cell::UnsafeWorldCell; diff --git a/crates/bevy_ecs/src/world/mod.rs b/crates/bevy_ecs/src/world/mod.rs index 71f581d6e0..9bd8d699c6 100644 --- a/crates/bevy_ecs/src/world/mod.rs +++ b/crates/bevy_ecs/src/world/mod.rs @@ -21,7 +21,7 @@ pub use crate::{ pub use bevy_ecs_macros::FromWorld; pub use component_constants::*; pub use deferred_world::DeferredWorld; -pub use entity_fetch::WorldEntityFetch; +pub use entity_fetch::{EntityFetcher, WorldEntityFetch}; pub use entity_ref::{ DynamicComponentFetch, EntityMut, EntityMutExcept, EntityRef, EntityRefExcept, EntityWorldMut, Entry, FilteredEntityMut, FilteredEntityRef, OccupiedEntry, TryFromFilteredError, VacantEntry, @@ -30,46 +30,49 @@ pub use filtered_resource::*; pub use identifier::WorldId; pub use spawn_batch::*; +#[expect( + deprecated, + reason = "We need to support `AllocAtWithoutReplacement` for now." +)] use crate::{ archetype::{ArchetypeId, ArchetypeRow, Archetypes}, bundle::{ Bundle, BundleEffect, BundleInfo, BundleInserter, BundleSpawner, Bundles, InsertMode, NoBundleEffect, }, - change_detection::{MutUntyped, TicksMut}, + change_detection::{MaybeLocation, MutUntyped, TicksMut}, component::{ - Component, ComponentDescriptor, ComponentHooks, ComponentId, ComponentInfo, ComponentTicks, - Components, Mutable, RequiredComponents, RequiredComponentsError, Tick, + Component, ComponentDescriptor, ComponentHooks, ComponentId, ComponentIds, ComponentInfo, + ComponentTicks, Components, ComponentsQueuedRegistrator, ComponentsRegistrator, Mutable, + RequiredComponents, RequiredComponentsError, Tick, }, - entity::{AllocAtWithoutReplacement, Entities, Entity, EntityLocation}, - entity_disabling::{DefaultQueryFilters, Disabled}, + entity::{ + AllocAtWithoutReplacement, Entities, Entity, EntityDoesNotExistError, EntityLocation, + }, + entity_disabling::DefaultQueryFilters, event::{Event, EventId, Events, SendBatchIds}, observer::Observers, query::{DebugCheckedUnwrap, QueryData, QueryFilter, QueryState}, + relationship::RelationshipHookMode, removal_detection::RemovedComponentEvents, resource::Resource, - result::Result, schedule::{Schedule, ScheduleLabel, Schedules}, storage::{ResourceData, Storages}, system::Commands, world::{ command_queue::RawCommandQueue, - error::{EntityFetchError, TryDespawnError, TryInsertBatchError, TryRunScheduleError}, + error::{ + EntityDespawnError, EntityMutableFetchError, TryInsertBatchError, TryRunScheduleError, + }, }, }; use alloc::{boxed::Box, vec::Vec}; -use bevy_platform_support::sync::atomic::{AtomicU32, Ordering}; -use bevy_ptr::{OwningPtr, Ptr}; +use bevy_platform::sync::atomic::{AtomicU32, Ordering}; +use bevy_ptr::{OwningPtr, Ptr, UnsafeCellDeref}; use core::{any::TypeId, fmt}; use log::warn; use unsafe_world_cell::{UnsafeEntityCell, UnsafeWorldCell}; -#[cfg(feature = "track_location")] -use bevy_ptr::UnsafeCellDeref; - -#[cfg(feature = "track_location")] -use core::panic::Location; - /// Stores and exposes operations on [entities](Entity), [components](Component), resources, /// and their associated metadata. /// @@ -92,6 +95,7 @@ pub struct World { id: WorldId, pub(crate) entities: Entities, pub(crate) components: Components, + pub(crate) component_ids: ComponentIds, pub(crate) archetypes: Archetypes, pub(crate) storages: Storages, pub(crate) bundles: Bundles, @@ -122,6 +126,7 @@ impl Default for World { last_check_tick: Tick::new(0), last_trigger_id: 0, command_queue: RawCommandQueue::new(), + component_ids: ComponentIds::default(), }; world.bootstrap(); world @@ -162,10 +167,8 @@ impl World { let on_despawn = OnDespawn::register_component_id(self); assert_eq!(ON_DESPAWN, on_despawn); - let disabled = self.register_component::(); - let mut filters = DefaultQueryFilters::default(); - filters.set_disabled(disabled); - self.insert_resource(filters); + // This sets up `Disabled` as a disabling component, via the FromWorld impl + self.init_resource::(); } /// Creates a new empty [`World`]. /// @@ -225,6 +228,22 @@ impl World { &self.components } + /// Prepares a [`ComponentsQueuedRegistrator`] for the world. + /// **NOTE:** [`ComponentsQueuedRegistrator`] is easily misused. + /// See its docs for important notes on when and how it should be used. + #[inline] + pub fn components_queue(&self) -> ComponentsQueuedRegistrator { + // SAFETY: These are from the same world. + unsafe { ComponentsQueuedRegistrator::new(&self.components, &self.component_ids) } + } + + /// Prepares a [`ComponentsRegistrator`] for the world. + #[inline] + pub fn components_registrator(&mut self) -> ComponentsRegistrator { + // SAFETY: These are from the same world. + unsafe { ComponentsRegistrator::new(&mut self.components, &mut self.component_ids) } + } + /// Retrieves this world's [`Storages`] collection. #[inline] pub fn storages(&self) -> &Storages { @@ -252,8 +271,20 @@ impl World { } /// Registers a new [`Component`] type and returns the [`ComponentId`] created for it. + /// + /// # Usage Notes + /// In most cases, you don't need to call this method directly since component registration + /// happens automatically during system initialization. pub fn register_component(&mut self) -> ComponentId { - self.components.register_component::() + self.components_registrator().register_component::() + } + + /// Registers a component type as "disabling", + /// using [default query filters](DefaultQueryFilters) to exclude entities with the component from queries. + pub fn register_disabling_component(&mut self) { + let component_id = self.register_component::(); + let mut dqf = self.resource_mut::(); + dqf.register_disabling_component(component_id); } /// Returns a mutable reference to the [`ComponentHooks`] for a [`Component`] type. @@ -529,7 +560,7 @@ impl World { &mut self, descriptor: ComponentDescriptor, ) -> ComponentId { - self.components + self.components_registrator() .register_component_with_descriptor(descriptor) } @@ -569,7 +600,7 @@ impl World { /// to insert the [`Resource`] in the [`World`], use [`World::init_resource`] or /// [`World::insert_resource`] instead. pub fn register_resource(&mut self) -> ComponentId { - self.components.register_resource::() + self.components_registrator().register_resource::() } /// Returns the [`ComponentId`] of the given [`Resource`] type `T`. @@ -658,10 +689,10 @@ impl World { /// } /// ``` /// - /// ## [`EntityHashSet`](crate::entity::hash_map::EntityHashMap) + /// ## [`EntityHashSet`](crate::entity::EntityHashMap) /// /// ``` - /// # use bevy_ecs::{prelude::*, entity::hash_set::EntityHashSet}; + /// # use bevy_ecs::{prelude::*, entity::EntityHashSet}; /// #[derive(Component)] /// struct Position { /// x: f32, @@ -679,7 +710,7 @@ impl World { /// } /// ``` /// - /// [`EntityHashSet`]: crate::entity::hash_set::EntityHashSet + /// [`EntityHashSet`]: crate::entity::EntityHashSet #[inline] #[track_caller] pub fn entity(&self, entities: F) -> F::Ref<'_> { @@ -695,7 +726,7 @@ impl World { match self.get_entity(entities) { Ok(fetched) => fetched, - Err(entity) => panic_no_entity(self, entity), + Err(error) => panic_no_entity(self, error.entity), } } @@ -710,8 +741,8 @@ impl World { /// such as adding or removing components, or despawning the entity. /// - Pass a slice of [`Entity`]s to receive a [`Vec`]. /// - Pass an array of [`Entity`]s to receive an equally-sized array of [`EntityMut`]s. - /// - Pass a reference to a [`EntityHashSet`](crate::entity::hash_map::EntityHashMap) to receive an - /// [`EntityHashMap`](crate::entity::hash_map::EntityHashMap). + /// - Pass a reference to a [`EntityHashSet`](crate::entity::EntityHashMap) to receive an + /// [`EntityHashMap`](crate::entity::EntityHashMap). /// /// In order to perform structural changes on the returned entity reference, /// such as adding or removing components, or despawning the entity, only a @@ -792,10 +823,10 @@ impl World { /// } /// ``` /// - /// ## [`EntityHashSet`](crate::entity::hash_map::EntityHashMap) + /// ## [`EntityHashSet`](crate::entity::EntityHashMap) /// /// ``` - /// # use bevy_ecs::{prelude::*, entity::hash_set::EntityHashSet}; + /// # use bevy_ecs::{prelude::*, entity::EntityHashSet}; /// #[derive(Component)] /// struct Position { /// x: f32, @@ -815,14 +846,14 @@ impl World { /// } /// ``` /// - /// [`EntityHashSet`]: crate::entity::hash_set::EntityHashSet + /// [`EntityHashSet`]: crate::entity::EntityHashSet #[inline] #[track_caller] pub fn entity_mut(&mut self, entities: F) -> F::Mut<'_> { #[inline(never)] #[cold] #[track_caller] - fn panic_on_err(e: EntityFetchError) -> ! { + fn panic_on_err(e: EntityMutableFetchError) -> ! { panic!("{e}"); } @@ -834,25 +865,23 @@ impl World { /// Returns the components of an [`Entity`] through [`ComponentInfo`]. #[inline] - pub fn inspect_entity(&self, entity: Entity) -> impl Iterator { + pub fn inspect_entity( + &self, + entity: Entity, + ) -> Result, EntityDoesNotExistError> { let entity_location = self .entities() .get(entity) - .unwrap_or_else(|| panic!("Entity {entity} does not exist")); + .ok_or(EntityDoesNotExistError::new(entity, self.entities()))?; let archetype = self .archetypes() .get(entity_location.archetype_id) - .unwrap_or_else(|| { - panic!( - "Archetype {:?} does not exist", - entity_location.archetype_id - ) - }); + .expect("ArchetypeId was retrieved from an EntityLocation and should correspond to an Archetype"); - archetype + Ok(archetype .components() - .filter_map(|id| self.components().get_info(id)) + .filter_map(|id| self.components().get_info(id))) } /// Returns [`EntityRef`]s that expose read-only operations for the given @@ -864,21 +893,24 @@ impl World { /// - Pass an [`Entity`] to receive a single [`EntityRef`]. /// - Pass a slice of [`Entity`]s to receive a [`Vec`]. /// - Pass an array of [`Entity`]s to receive an equally-sized array of [`EntityRef`]s. - /// - Pass a reference to a [`EntityHashSet`](crate::entity::hash_map::EntityHashMap) to receive an - /// [`EntityHashMap`](crate::entity::hash_map::EntityHashMap). + /// - Pass a reference to a [`EntityHashSet`](crate::entity::EntityHashMap) to receive an + /// [`EntityHashMap`](crate::entity::EntityHashMap). /// /// # Errors /// /// If any of the given `entities` do not exist in the world, the first - /// [`Entity`] found to be missing will be returned in the [`Err`]. + /// [`Entity`] found to be missing will return an [`EntityDoesNotExistError`]. /// /// # Examples /// /// For examples, see [`World::entity`]. /// - /// [`EntityHashSet`]: crate::entity::hash_set::EntityHashSet + /// [`EntityHashSet`]: crate::entity::EntityHashSet #[inline] - pub fn get_entity(&self, entities: F) -> Result, Entity> { + pub fn get_entity( + &self, + entities: F, + ) -> Result, EntityDoesNotExistError> { let cell = self.as_unsafe_world_cell_readonly(); // SAFETY: `&self` gives read access to the entire world, and prevents mutable access. unsafe { entities.fetch_ref(cell) } @@ -895,8 +927,8 @@ impl World { /// such as adding or removing components, or despawning the entity. /// - Pass a slice of [`Entity`]s to receive a [`Vec`]. /// - Pass an array of [`Entity`]s to receive an equally-sized array of [`EntityMut`]s. - /// - Pass a reference to a [`EntityHashSet`](crate::entity::hash_map::EntityHashMap) to receive an - /// [`EntityHashMap`](crate::entity::hash_map::EntityHashMap). + /// - Pass a reference to a [`EntityHashSet`](crate::entity::EntityHashMap) to receive an + /// [`EntityHashMap`](crate::entity::EntityHashMap). /// /// In order to perform structural changes on the returned entity reference, /// such as adding or removing components, or despawning the entity, only a @@ -906,20 +938,20 @@ impl World { /// /// # Errors /// - /// - Returns [`EntityFetchError::NoSuchEntity`] if any of the given `entities` do not exist in the world. + /// - Returns [`EntityMutableFetchError::EntityDoesNotExist`] if any of the given `entities` do not exist in the world. /// - Only the first entity found to be missing will be returned. - /// - Returns [`EntityFetchError::AliasedMutability`] if the same entity is requested multiple times. + /// - Returns [`EntityMutableFetchError::AliasedMutability`] if the same entity is requested multiple times. /// /// # Examples /// /// For examples, see [`World::entity_mut`]. /// - /// [`EntityHashSet`]: crate::entity::hash_set::EntityHashSet + /// [`EntityHashSet`]: crate::entity::EntityHashSet #[inline] pub fn get_entity_mut( &mut self, entities: F, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { let cell = self.as_unsafe_world_cell(); // SAFETY: `&mut self` gives mutable access to the entire world, // and prevents any other access to the world. @@ -983,6 +1015,52 @@ impl World { }) } + /// Simultaneously provides access to entity data and a command queue, which + /// will be applied when the world is next flushed. + /// + /// This allows using borrowed entity data to construct commands where the + /// borrow checker would otherwise prevent it. + /// + /// See [`DeferredWorld::entities_and_commands`] for the deferred version. + /// + /// # Example + /// + /// ```rust + /// # use bevy_ecs::{prelude::*, world::DeferredWorld}; + /// #[derive(Component)] + /// struct Targets(Vec); + /// #[derive(Component)] + /// struct TargetedBy(Entity); + /// + /// let mut world: World = // ... + /// # World::new(); + /// # let e1 = world.spawn_empty().id(); + /// # let e2 = world.spawn_empty().id(); + /// # let eid = world.spawn(Targets(vec![e1, e2])).id(); + /// let (entities, mut commands) = world.entities_and_commands(); + /// + /// let entity = entities.get(eid).unwrap(); + /// for &target in entity.get::().unwrap().0.iter() { + /// commands.entity(target).insert(TargetedBy(eid)); + /// } + /// # world.flush(); + /// # assert_eq!(world.get::(e1).unwrap().0, eid); + /// # assert_eq!(world.get::(e2).unwrap().0, eid); + /// ``` + pub fn entities_and_commands(&mut self) -> (EntityFetcher, Commands) { + let cell = self.as_unsafe_world_cell(); + // SAFETY: `&mut self` gives mutable access to the entire world, and prevents simultaneous access. + let fetcher = unsafe { EntityFetcher::new(cell) }; + // SAFETY: + // - `&mut self` gives mutable access to the entire world, and prevents simultaneous access. + // - Command queue access does not conflict with entity access. + let raw_queue = unsafe { cell.get_raw_command_queue() }; + // SAFETY: `&mut self` ensures the commands does not outlive the world. + let commands = unsafe { Commands::new_raw_from_entities(raw_queue, cell.entities()) }; + + (fetcher, commands) + } + /// Spawns a new [`Entity`] and returns a corresponding [`EntityWorldMut`], which can be used /// to add components to the entity or retrieve its id. /// @@ -1013,13 +1091,7 @@ impl World { self.flush(); let entity = self.entities.alloc(); // SAFETY: entity was just allocated - unsafe { - self.spawn_at_empty_internal( - entity, - #[cfg(feature = "track_location")] - Location::caller(), - ) - } + unsafe { self.spawn_at_empty_internal(entity, MaybeLocation::caller()) } } /// Spawns a new [`Entity`] with a given [`Bundle`] of [components](`Component`) and returns @@ -1084,42 +1156,31 @@ impl World { /// ``` #[track_caller] pub fn spawn(&mut self, bundle: B) -> EntityWorldMut { - self.spawn_with_caller( - bundle, - #[cfg(feature = "track_location")] - Location::caller(), - ) + self.spawn_with_caller(bundle, MaybeLocation::caller()) } pub(crate) fn spawn_with_caller( &mut self, bundle: B, - #[cfg(feature = "track_location")] caller: &'static Location<'static>, + caller: MaybeLocation, ) -> EntityWorldMut { self.flush(); let change_tick = self.change_tick(); let entity = self.entities.alloc(); let mut bundle_spawner = BundleSpawner::new::(self, change_tick); // SAFETY: bundle's type matches `bundle_info`, entity is allocated but non-existent - let (mut entity_location, after_effect) = unsafe { - bundle_spawner.spawn_non_existent( - entity, - bundle, - #[cfg(feature = "track_location")] - caller, - ) - }; + let (mut entity_location, after_effect) = + unsafe { bundle_spawner.spawn_non_existent(entity, bundle, caller) }; // SAFETY: command_queue is not referenced anywhere else if !unsafe { self.command_queue.is_empty() } { - self.flush_commands(); + self.flush(); entity_location = self .entities() .get(entity) .unwrap_or(EntityLocation::INVALID); } - #[cfg(feature = "track_location")] self.entities .set_spawned_or_despawned_by(entity.index(), caller); @@ -1134,7 +1195,7 @@ impl World { unsafe fn spawn_at_empty_internal( &mut self, entity: Entity, - #[cfg(feature = "track_location")] caller: &'static Location, + caller: MaybeLocation, ) -> EntityWorldMut { let archetype = self.archetypes.empty_mut(); // PERF: consider avoiding allocating entities in the empty archetype unless needed @@ -1144,7 +1205,6 @@ impl World { let location = unsafe { archetype.allocate(entity, table_row) }; self.entities.set(entity.index(), location); - #[cfg(feature = "track_location")] self.entities .set_spawned_or_despawned_by(entity.index(), caller); @@ -1179,12 +1239,7 @@ impl World { I: IntoIterator, I::Item: Bundle, { - SpawnBatchIter::new( - self, - iter.into_iter(), - #[cfg(feature = "track_location")] - Location::caller(), - ) + SpawnBatchIter::new(self, iter.into_iter(), MaybeLocation::caller()) } /// Retrieves a reference to the given `entity`'s [`Component`] of the given type. @@ -1268,21 +1323,39 @@ impl World { &mut self, entity: Entity, f: impl FnOnce(&mut T) -> R, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { let mut world = DeferredWorld::from(&mut *self); - let result = match world.modify_component(entity, f) { - Ok(result) => result, - Err(EntityFetchError::AliasedMutability(..)) => { - return Err(EntityFetchError::AliasedMutability(entity)) - } - Err(EntityFetchError::NoSuchEntity(..)) => { - return Err(EntityFetchError::NoSuchEntity( - entity, - self.entities().entity_does_not_exist_error_details(entity), - )) - } - }; + let result = world.modify_component(entity, f)?; + + self.flush(); + Ok(result) + } + + /// Temporarily removes a [`Component`] identified by the provided + /// [`ComponentId`] from the provided [`Entity`] and runs the provided + /// closure on it, returning the result if the component was available. + /// This will trigger the `OnRemove` and `OnReplace` component hooks without + /// causing an archetype move. + /// + /// This is most useful with immutable components, where removal and reinsertion + /// is the only way to modify a value. + /// + /// If you do not need to ensure the above hooks are triggered, and your component + /// is mutable, prefer using [`get_mut_by_id`](World::get_mut_by_id). + /// + /// You should prefer the typed [`modify_component`](World::modify_component) + /// whenever possible. + #[inline] + pub fn modify_component_by_id( + &mut self, + entity: Entity, + component_id: ComponentId, + f: impl for<'a> FnOnce(MutUntyped<'a>) -> R, + ) -> Result, EntityMutableFetchError> { + let mut world = DeferredWorld::from(&mut *self); + + let result = world.modify_component_by_id(entity, component_id, f)?; self.flush(); Ok(result) @@ -1317,11 +1390,7 @@ impl World { #[track_caller] #[inline] pub fn despawn(&mut self, entity: Entity) -> bool { - if let Err(error) = self.despawn_with_caller( - entity, - #[cfg(feature = "track_location")] - Location::caller(), - ) { + if let Err(error) = self.despawn_with_caller(entity, MaybeLocation::caller()) { warn!("{error}"); false } else { @@ -1332,7 +1401,7 @@ impl World { /// Despawns the given `entity`, if it exists. This will also remove all of the entity's /// [`Components`](Component). /// - /// Returns a [`TryDespawnError`] if the entity does not exist. + /// Returns an [`EntityDespawnError`] if the entity does not exist. /// /// # Note /// @@ -1340,33 +1409,20 @@ impl World { /// to despawn descendants. For example, this will recursively despawn [`Children`](crate::hierarchy::Children). #[track_caller] #[inline] - pub fn try_despawn(&mut self, entity: Entity) -> Result<(), TryDespawnError> { - self.despawn_with_caller( - entity, - #[cfg(feature = "track_location")] - Location::caller(), - ) + pub fn try_despawn(&mut self, entity: Entity) -> Result<(), EntityDespawnError> { + self.despawn_with_caller(entity, MaybeLocation::caller()) } #[inline] pub(crate) fn despawn_with_caller( &mut self, entity: Entity, - #[cfg(feature = "track_location")] caller: &'static Location, - ) -> Result<(), TryDespawnError> { + caller: MaybeLocation, + ) -> Result<(), EntityDespawnError> { self.flush(); - if let Ok(entity) = self.get_entity_mut(entity) { - entity.despawn_with_caller( - #[cfg(feature = "track_location")] - caller, - ); - Ok(()) - } else { - Err(TryDespawnError { - entity, - details: self.entities().entity_does_not_exist_error_details(entity), - }) - } + let entity = self.get_entity_mut(entity)?; + entity.despawn_with_caller(caller); + Ok(()) } /// Clears the internal component tracker state. @@ -1616,7 +1672,7 @@ impl World { &mut self, descriptor: ComponentDescriptor, ) -> ComponentId { - self.components + self.components_registrator() .register_resource_with_descriptor(descriptor) } @@ -1630,9 +1686,8 @@ impl World { #[inline] #[track_caller] pub fn init_resource(&mut self) -> ComponentId { - #[cfg(feature = "track_location")] - let caller = Location::caller(); - let component_id = self.components.register_resource::(); + let caller = MaybeLocation::caller(); + let component_id = self.components_registrator().register_resource::(); if self .storages .resources @@ -1643,12 +1698,7 @@ impl World { OwningPtr::make(value, |ptr| { // SAFETY: component_id was just initialized and corresponds to resource of type R. unsafe { - self.insert_resource_by_id( - component_id, - ptr, - #[cfg(feature = "track_location")] - caller, - ); + self.insert_resource_by_id(component_id, ptr, caller); } }); } @@ -1663,11 +1713,7 @@ impl World { #[inline] #[track_caller] pub fn insert_resource(&mut self, value: R) { - self.insert_resource_with_caller( - value, - #[cfg(feature = "track_location")] - Location::caller(), - ); + self.insert_resource_with_caller(value, MaybeLocation::caller()); } /// Split into a new function so we can pass the calling location into the function when using @@ -1676,18 +1722,13 @@ impl World { pub(crate) fn insert_resource_with_caller( &mut self, value: R, - #[cfg(feature = "track_location")] caller: &'static Location, + caller: MaybeLocation, ) { - let component_id = self.components.register_resource::(); + let component_id = self.components_registrator().register_resource::(); OwningPtr::make(value, |ptr| { // SAFETY: component_id was just initialized and corresponds to resource of type R. unsafe { - self.insert_resource_by_id( - component_id, - ptr, - #[cfg(feature = "track_location")] - caller, - ); + self.insert_resource_by_id(component_id, ptr, caller); } }); } @@ -1706,9 +1747,8 @@ impl World { #[inline] #[track_caller] pub fn init_non_send_resource(&mut self) -> ComponentId { - #[cfg(feature = "track_location")] - let caller = Location::caller(); - let component_id = self.components.register_non_send::(); + let caller = MaybeLocation::caller(); + let component_id = self.components_registrator().register_non_send::(); if self .storages .non_send_resources @@ -1719,12 +1759,7 @@ impl World { OwningPtr::make(value, |ptr| { // SAFETY: component_id was just initialized and corresponds to resource of type R. unsafe { - self.insert_non_send_by_id( - component_id, - ptr, - #[cfg(feature = "track_location")] - caller, - ); + self.insert_non_send_by_id(component_id, ptr, caller); } }); } @@ -1743,18 +1778,12 @@ impl World { #[inline] #[track_caller] pub fn insert_non_send_resource(&mut self, value: R) { - #[cfg(feature = "track_location")] - let caller = Location::caller(); - let component_id = self.components.register_non_send::(); + let caller = MaybeLocation::caller(); + let component_id = self.components_registrator().register_non_send::(); OwningPtr::make(value, |ptr| { // SAFETY: component_id was just initialized and corresponds to resource of type R. unsafe { - self.insert_non_send_by_id( - component_id, - ptr, - #[cfg(feature = "track_location")] - caller, - ); + self.insert_non_send_by_id(component_id, ptr, caller); } }); } @@ -2029,23 +2058,17 @@ impl World { &mut self, func: impl FnOnce() -> R, ) -> Mut<'_, R> { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); let change_tick = self.change_tick(); let last_change_tick = self.last_change_tick(); - let component_id = self.components.register_resource::(); + let component_id = self.components_registrator().register_resource::(); let data = self.initialize_resource_internal(component_id); if !data.is_present() { OwningPtr::make(func(), |ptr| { // SAFETY: component_id was just initialized and corresponds to resource of type R. unsafe { - data.insert( - ptr, - change_tick, - #[cfg(feature = "track_location")] - caller, - ); + data.insert(ptr, change_tick, caller); } }); } @@ -2093,12 +2116,11 @@ impl World { /// ``` #[track_caller] pub fn get_resource_or_init(&mut self) -> Mut<'_, R> { - #[cfg(feature = "track_location")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); let change_tick = self.change_tick(); let last_change_tick = self.last_change_tick(); - let component_id = self.components.register_resource::(); + let component_id = self.components_registrator().register_resource::(); if self .storages .resources @@ -2109,12 +2131,7 @@ impl World { OwningPtr::make(value, |ptr| { // SAFETY: component_id was just initialized and corresponds to resource of type R. unsafe { - self.insert_resource_by_id( - component_id, - ptr, - #[cfg(feature = "track_location")] - caller, - ); + self.insert_resource_by_id(component_id, ptr, caller); } }); } @@ -2235,26 +2252,34 @@ impl World { /// assert_eq!(world.get::(e0), Some(&B(0.0))); /// ``` #[track_caller] + #[deprecated( + since = "0.16.0", + note = "This can cause extreme performance problems when used with lots of arbitrary free entities. See #18054 on GitHub." + )] pub fn insert_or_spawn_batch(&mut self, iter: I) -> Result<(), Vec> where I: IntoIterator, I::IntoIter: Iterator, B: Bundle, { - self.insert_or_spawn_batch_with_caller( - iter, - #[cfg(feature = "track_location")] - Location::caller(), - ) + #[expect( + deprecated, + reason = "This needs to be supported for now, and the outer function is deprecated too." + )] + self.insert_or_spawn_batch_with_caller(iter, MaybeLocation::caller()) } /// Split into a new function so we can pass the calling location into the function when using /// as a command. #[inline] + #[deprecated( + since = "0.16.0", + note = "This can cause extreme performance problems when used with lots of arbitrary free entities. See #18054 on GitHub." + )] pub(crate) fn insert_or_spawn_batch_with_caller( &mut self, iter: I, - #[cfg(feature = "track_location")] caller: &'static Location, + caller: MaybeLocation, ) -> Result<(), Vec> where I: IntoIterator, @@ -2262,12 +2287,14 @@ impl World { B: Bundle, { self.flush(); - let change_tick = self.change_tick(); + // SAFETY: These come from the same world. `Self.components_registrator` can't be used since we borrow other fields too. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut self.components, &mut self.component_ids) }; let bundle_id = self .bundles - .register_info::(&mut self.components, &mut self.storages); + .register_info::(&mut registrator, &mut self.storages); enum SpawnOrInsert<'w> { Spawn(BundleSpawner<'w>), Insert(BundleInserter<'w>, ArchetypeId), @@ -2288,6 +2315,10 @@ impl World { let mut invalid_entities = Vec::new(); for (entity, bundle) in iter { + #[expect( + deprecated, + reason = "This needs to be supported for now, and the outer function is deprecated too." + )] match spawn_or_insert .entities() .alloc_at_without_replacement(entity) @@ -2304,8 +2335,8 @@ impl World { location, bundle, InsertMode::Replace, - #[cfg(feature = "track_location")] caller, + RelationshipHookMode::Run, ) }; } @@ -2326,8 +2357,8 @@ impl World { location, bundle, InsertMode::Replace, - #[cfg(feature = "track_location")] caller, + RelationshipHookMode::Run, ) }; spawn_or_insert = @@ -2338,27 +2369,13 @@ impl World { AllocAtWithoutReplacement::DidNotExist => { if let SpawnOrInsert::Spawn(ref mut spawner) = spawn_or_insert { // SAFETY: `entity` is allocated (but non existent), bundle matches inserter - unsafe { - spawner.spawn_non_existent( - entity, - bundle, - #[cfg(feature = "track_location")] - caller, - ) - }; + unsafe { spawner.spawn_non_existent(entity, bundle, caller) }; } else { // SAFETY: we initialized this bundle_id in `init_info` let mut spawner = unsafe { BundleSpawner::new_with_id(self, bundle_id, change_tick) }; // SAFETY: `entity` is valid, `location` matches entity, bundle matches inserter - unsafe { - spawner.spawn_non_existent( - entity, - bundle, - #[cfg(feature = "track_location")] - caller, - ) - }; + unsafe { spawner.spawn_non_existent(entity, bundle, caller) }; spawn_or_insert = SpawnOrInsert::Spawn(spawner); } } @@ -2397,12 +2414,7 @@ impl World { I::IntoIter: Iterator, B: Bundle, { - self.insert_batch_with_caller( - batch, - InsertMode::Replace, - #[cfg(feature = "track_location")] - Location::caller(), - ); + self.insert_batch_with_caller(batch, InsertMode::Replace, MaybeLocation::caller()); } /// For a given batch of ([`Entity`], [`Bundle`]) pairs, @@ -2427,12 +2439,7 @@ impl World { I::IntoIter: Iterator, B: Bundle, { - self.insert_batch_with_caller( - batch, - InsertMode::Keep, - #[cfg(feature = "track_location")] - Location::caller(), - ); + self.insert_batch_with_caller(batch, InsertMode::Keep, MaybeLocation::caller()); } /// Split into a new function so we can differentiate the calling location. @@ -2445,7 +2452,7 @@ impl World { &mut self, batch: I, insert_mode: InsertMode, - #[cfg(feature = "track_location")] caller: &'static Location, + caller: MaybeLocation, ) where I: IntoIterator, I::IntoIter: Iterator, @@ -2458,9 +2465,12 @@ impl World { self.flush(); let change_tick = self.change_tick(); + // SAFETY: These come from the same world. `Self.components_registrator` can't be used since we borrow other fields too. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut self.components, &mut self.component_ids) }; let bundle_id = self .bundles - .register_info::(&mut self.components, &mut self.storages); + .register_info::(&mut registrator, &mut self.storages); let mut batch_iter = batch.into_iter(); @@ -2485,8 +2495,8 @@ impl World { first_location, first_bundle, insert_mode, - #[cfg(feature = "track_location")] caller, + RelationshipHookMode::Run, ) }; @@ -2513,8 +2523,8 @@ impl World { location, bundle, insert_mode, - #[cfg(feature = "track_location")] caller, + RelationshipHookMode::Run, ) }; } else { @@ -2547,12 +2557,7 @@ impl World { I::IntoIter: Iterator, B: Bundle, { - self.try_insert_batch_with_caller( - batch, - InsertMode::Replace, - #[cfg(feature = "track_location")] - Location::caller(), - ) + self.try_insert_batch_with_caller(batch, InsertMode::Replace, MaybeLocation::caller()) } /// For a given batch of ([`Entity`], [`Bundle`]) pairs, /// adds the `Bundle` of components to each `Entity` without overwriting. @@ -2574,12 +2579,7 @@ impl World { I::IntoIter: Iterator, B: Bundle, { - self.try_insert_batch_with_caller( - batch, - InsertMode::Keep, - #[cfg(feature = "track_location")] - Location::caller(), - ) + self.try_insert_batch_with_caller(batch, InsertMode::Keep, MaybeLocation::caller()) } /// Split into a new function so we can differentiate the calling location. @@ -2596,7 +2596,7 @@ impl World { &mut self, batch: I, insert_mode: InsertMode, - #[cfg(feature = "track_location")] caller: &'static Location, + caller: MaybeLocation, ) -> Result<(), TryInsertBatchError> where I: IntoIterator, @@ -2610,9 +2610,12 @@ impl World { self.flush(); let change_tick = self.change_tick(); + // SAFETY: These come from the same world. `Self.components_registrator` can't be used since we borrow other fields too. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut self.components, &mut self.component_ids) }; let bundle_id = self .bundles - .register_info::(&mut self.components, &mut self.storages); + .register_info::(&mut registrator, &mut self.storages); let mut invalid_entities = Vec::::new(); let mut batch_iter = batch.into_iter(); @@ -2642,8 +2645,8 @@ impl World { first_location, first_bundle, insert_mode, - #[cfg(feature = "track_location")] caller, + RelationshipHookMode::Run, ) }; break Some(cache); @@ -2679,8 +2682,8 @@ impl World { location, bundle, insert_mode, - #[cfg(feature = "track_location")] caller, + RelationshipHookMode::Run, ) }; } else { @@ -2745,7 +2748,7 @@ impl World { let change_tick = self.change_tick(); let component_id = self.components.get_resource_id(TypeId::of::())?; - let (ptr, mut ticks, mut _caller) = self + let (ptr, mut ticks, mut caller) = self .storages .resources .get_mut(component_id) @@ -2761,8 +2764,7 @@ impl World { last_run: last_change_tick, this_run: change_tick, }, - #[cfg(feature = "track_location")] - changed_by: &mut _caller, + changed_by: caller.as_mut(), }; let result = f(self, value_mut); assert!(!self.contains_resource::(), @@ -2774,12 +2776,7 @@ impl World { // SAFETY: pointer is of type R unsafe { self.storages.resources.get_mut(component_id).map(|info| { - info.insert_with_ticks( - ptr, - ticks, - #[cfg(feature = "track_location")] - _caller, - ); + info.insert_with_ticks(ptr, ticks, caller); }) } })?; @@ -2834,19 +2831,14 @@ impl World { &mut self, component_id: ComponentId, value: OwningPtr<'_>, - #[cfg(feature = "track_location")] caller: &'static Location, + caller: MaybeLocation, ) { let change_tick = self.change_tick(); let resource = self.initialize_resource_internal(component_id); // SAFETY: `value` is valid for `component_id`, ensured by caller unsafe { - resource.insert( - value, - change_tick, - #[cfg(feature = "track_location")] - caller, - ); + resource.insert(value, change_tick, caller); } } @@ -2868,19 +2860,14 @@ impl World { &mut self, component_id: ComponentId, value: OwningPtr<'_>, - #[cfg(feature = "track_location")] caller: &'static Location, + caller: MaybeLocation, ) { let change_tick = self.change_tick(); let resource = self.initialize_non_send_internal(component_id); // SAFETY: `value` is valid for `component_id`, ensured by caller unsafe { - resource.insert( - value, - change_tick, - #[cfg(feature = "track_location")] - caller, - ); + resource.insert(value, change_tick, caller); } } @@ -2891,6 +2878,7 @@ impl World { &mut self, component_id: ComponentId, ) -> &mut ResourceData { + self.flush_components(); let archetypes = &mut self.archetypes; self.storages .resources @@ -2906,6 +2894,7 @@ impl World { &mut self, component_id: ComponentId, ) -> &mut ResourceData { + self.flush_components(); let archetypes = &mut self.archetypes; self.storages .non_send_resources @@ -2949,12 +2938,22 @@ impl World { } } + /// Applies any queued component registration. + /// For spawning vanilla rust component types and resources, this is not strictly necessary. + /// However, flushing components can make information available more quickly, and can have performance benefits. + /// Additionally, for components and resources registered dynamically through a raw descriptor or similar, + /// this is the only way to complete their registration. + pub(crate) fn flush_components(&mut self) { + self.components_registrator().apply_queued_registrations(); + } + /// Flushes queued entities and commands. /// /// Queued entities will be spawned, and then commands will be applied. #[inline] pub fn flush(&mut self) { self.flush_entities(); + self.flush_components(); self.flush_commands(); } @@ -3188,9 +3187,31 @@ impl World { /// component in the bundle. #[inline] pub fn register_bundle(&mut self) -> &BundleInfo { + // SAFETY: These come from the same world. `Self.components_registrator` can't be used since we borrow other fields too. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut self.components, &mut self.component_ids) }; let id = self .bundles - .register_info::(&mut self.components, &mut self.storages); + .register_info::(&mut registrator, &mut self.storages); + // SAFETY: We just initialized the bundle so its id should definitely be valid. + unsafe { self.bundles.get(id).debug_checked_unwrap() } + } + + /// Registers the given [`ComponentId`]s as a dynamic bundle and returns both the required component ids and the bundle id. + /// + /// Note that the components need to be registered first, this function only creates a bundle combining them. Components + /// can be registered with [`World::register_component`]/[`_with_descriptor`](World::register_component_with_descriptor). + /// + /// **You should prefer to use the typed API [`World::register_bundle`] where possible and only use this in cases where + /// not all of the actual types are known at compile time.** + /// + /// # Panics + /// This function will panic if any of the provided component ids do not belong to a component known to this [`World`]. + #[inline] + pub fn register_dynamic_bundle(&mut self, component_ids: &[ComponentId]) -> &BundleInfo { + let id = + self.bundles + .init_dynamic_info(&mut self.storages, &self.components, component_ids); // SAFETY: We just initialized the bundle so its id should definitely be valid. unsafe { self.bundles.get(id).debug_checked_unwrap() } } @@ -3410,7 +3431,7 @@ impl World { .get_info(component_id) .debug_checked_unwrap() }; - let (ptr, ticks, _caller) = data.get_with_ticks()?; + let (ptr, ticks, caller) = data.get_with_ticks()?; // SAFETY: // - We have exclusive access to the world, so no other code can be aliasing the `TickCells` @@ -3429,11 +3450,10 @@ impl World { // - We iterate one resource at a time, and we let go of each `PtrMut` before getting the next one value: unsafe { ptr.assert_unique() }, ticks, - #[cfg(feature = "track_location")] // SAFETY: // - We have exclusive access to the world, so no other code can be aliasing the `Ptr` // - We iterate one resource at a time, and we let go of each `PtrMut` before getting the next one - changed_by: unsafe { _caller.deref_mut() }, + changed_by: unsafe { caller.map(|caller| caller.deref_mut()) }, }; Some((component_info, mut_untyped)) @@ -3741,16 +3761,17 @@ impl FromWorld for T { } #[cfg(test)] +#[expect(clippy::print_stdout, reason = "Allowed in tests.")] mod tests { use super::{FromWorld, World}; use crate::{ - change_detection::DetectChangesMut, + change_detection::{DetectChangesMut, MaybeLocation}, component::{ComponentCloneBehavior, ComponentDescriptor, ComponentInfo, StorageType}, - entity::hash_set::EntityHashSet, + entity::EntityHashSet, entity_disabling::{DefaultQueryFilters, Disabled}, ptr::OwningPtr, resource::Resource, - world::error::EntityFetchError, + world::{error::EntityMutableFetchError, DeferredWorld}, }; use alloc::{ borrow::ToOwned, @@ -3760,7 +3781,7 @@ mod tests { vec::Vec, }; use bevy_ecs_macros::Component; - use bevy_platform_support::collections::{HashMap, HashSet}; + use bevy_platform::collections::{HashMap, HashSet}; use core::{ any::TypeId, panic, @@ -4007,12 +4028,7 @@ mod tests { OwningPtr::make(value, |ptr| { // SAFETY: value is valid for the layout of `TestResource` unsafe { - world.insert_resource_by_id( - component_id, - ptr, - #[cfg(feature = "track_location")] - panic::Location::caller(), - ); + world.insert_resource_by_id(component_id, ptr, MaybeLocation::caller()); } }); @@ -4056,12 +4072,7 @@ mod tests { OwningPtr::make(value, |ptr| { // SAFETY: value is valid for the component layout unsafe { - world.insert_resource_by_id( - component_id, - ptr, - #[cfg(feature = "track_location")] - panic::Location::caller(), - ); + world.insert_resource_by_id(component_id, ptr, MaybeLocation::caller()); } }); @@ -4145,39 +4156,39 @@ mod tests { let bar_id = TypeId::of::(); let baz_id = TypeId::of::(); assert_eq!( - to_type_ids(world.inspect_entity(ent0).collect()), + to_type_ids(world.inspect_entity(ent0).unwrap().collect()), [Some(foo_id), Some(bar_id), Some(baz_id)] .into_iter() .collect::>() ); assert_eq!( - to_type_ids(world.inspect_entity(ent1).collect()), + to_type_ids(world.inspect_entity(ent1).unwrap().collect()), [Some(foo_id), Some(bar_id)] .into_iter() .collect::>() ); assert_eq!( - to_type_ids(world.inspect_entity(ent2).collect()), + to_type_ids(world.inspect_entity(ent2).unwrap().collect()), [Some(bar_id), Some(baz_id)] .into_iter() .collect::>() ); assert_eq!( - to_type_ids(world.inspect_entity(ent3).collect()), + to_type_ids(world.inspect_entity(ent3).unwrap().collect()), [Some(foo_id), Some(baz_id)] .into_iter() .collect::>() ); assert_eq!( - to_type_ids(world.inspect_entity(ent4).collect()), + to_type_ids(world.inspect_entity(ent4).unwrap().collect()), [Some(foo_id)].into_iter().collect::>() ); assert_eq!( - to_type_ids(world.inspect_entity(ent5).collect()), + to_type_ids(world.inspect_entity(ent5).unwrap().collect()), [Some(bar_id)].into_iter().collect::>() ); assert_eq!( - to_type_ids(world.inspect_entity(ent6).collect()), + to_type_ids(world.inspect_entity(ent6).unwrap().collect()), [Some(baz_id)].into_iter().collect::>() ); } @@ -4324,20 +4335,34 @@ mod tests { world.entity_mut(e1).despawn(); - assert_eq!(Err(e1), world.get_entity(e1).map(|_| {})); - assert_eq!(Err(e1), world.get_entity([e1, e2]).map(|_| {})); + assert_eq!( + Err(e1), + world.get_entity(e1).map(|_| {}).map_err(|e| e.entity) + ); + assert_eq!( + Err(e1), + world.get_entity([e1, e2]).map(|_| {}).map_err(|e| e.entity) + ); assert_eq!( Err(e1), world .get_entity(&[e1, e2] /* this is an array not a slice */) .map(|_| {}) + .map_err(|e| e.entity) + ); + assert_eq!( + Err(e1), + world + .get_entity(&vec![e1, e2][..]) + .map(|_| {}) + .map_err(|e| e.entity) ); - assert_eq!(Err(e1), world.get_entity(&vec![e1, e2][..]).map(|_| {})); assert_eq!( Err(e1), world .get_entity(&EntityHashSet::from_iter([e1, e2])) .map(|_| {}) + .map_err(|e| e.entity) ); } @@ -4359,17 +4384,17 @@ mod tests { .is_ok()); assert_eq!( - Err(EntityFetchError::AliasedMutability(e1)), + Err(EntityMutableFetchError::AliasedMutability(e1)), world.get_entity_mut([e1, e2, e1]).map(|_| {}) ); assert_eq!( - Err(EntityFetchError::AliasedMutability(e1)), + Err(EntityMutableFetchError::AliasedMutability(e1)), world .get_entity_mut(&[e1, e2, e1] /* this is an array not a slice */) .map(|_| {}) ); assert_eq!( - Err(EntityFetchError::AliasedMutability(e1)), + Err(EntityMutableFetchError::AliasedMutability(e1)), world.get_entity_mut(&vec![e1, e2, e1][..]).map(|_| {}) ); // Aliased mutability isn't allowed by HashSets @@ -4381,28 +4406,27 @@ mod tests { assert!(matches!( world.get_entity_mut(e1).map(|_| {}), - Err(EntityFetchError::NoSuchEntity(e, ..)) if e == e1 + Err(EntityMutableFetchError::EntityDoesNotExist(e)) if e.entity == e1 )); assert!(matches!( world.get_entity_mut([e1, e2]).map(|_| {}), - Err(EntityFetchError::NoSuchEntity(e,..)) if e == e1)); + Err(EntityMutableFetchError::EntityDoesNotExist(e)) if e.entity == e1)); assert!(matches!( world .get_entity_mut(&[e1, e2] /* this is an array not a slice */) .map(|_| {}), - Err(EntityFetchError::NoSuchEntity(e, ..)) if e == e1)); + Err(EntityMutableFetchError::EntityDoesNotExist(e)) if e.entity == e1)); assert!(matches!( world.get_entity_mut(&vec![e1, e2][..]).map(|_| {}), - Err(EntityFetchError::NoSuchEntity(e, ..)) if e == e1, + Err(EntityMutableFetchError::EntityDoesNotExist(e)) if e.entity == e1, )); assert!(matches!( world .get_entity_mut(&EntityHashSet::from_iter([e1, e2])) .map(|_| {}), - Err(EntityFetchError::NoSuchEntity(e, ..)) if e == e1)); + Err(EntityMutableFetchError::EntityDoesNotExist(e)) if e.entity == e1)); } - #[cfg(feature = "track_location")] #[test] #[track_caller] fn entity_spawn_despawn_tracking() { @@ -4412,23 +4436,23 @@ mod tests { let entity = world.spawn_empty().id(); assert_eq!( world.entities.entity_get_spawned_or_despawned_by(entity), - Some(Location::caller()) + MaybeLocation::new(Some(Location::caller())) ); world.despawn(entity); assert_eq!( world.entities.entity_get_spawned_or_despawned_by(entity), - Some(Location::caller()) + MaybeLocation::new(Some(Location::caller())) ); let new = world.spawn_empty().id(); assert_eq!(entity.index(), new.index()); assert_eq!( world.entities.entity_get_spawned_or_despawned_by(entity), - None + MaybeLocation::new(None) ); world.despawn(new); assert_eq!( world.entities.entity_get_spawned_or_despawned_by(entity), - None + MaybeLocation::new(None) ); } @@ -4443,4 +4467,44 @@ mod tests { world.remove_resource::(); assert_eq!(2, world.query::<&Foo>().iter(&world).count()); } + + #[test] + fn entities_and_commands() { + #[derive(Component, PartialEq, Debug)] + struct Foo(u32); + + let mut world = World::new(); + + let eid = world.spawn(Foo(35)).id(); + + let (mut fetcher, mut commands) = world.entities_and_commands(); + let emut = fetcher.get_mut(eid).unwrap(); + commands.entity(eid).despawn(); + assert_eq!(emut.get::().unwrap(), &Foo(35)); + + world.flush(); + + assert!(world.get_entity(eid).is_err()); + } + + #[test] + fn entities_and_commands_deferred() { + #[derive(Component, PartialEq, Debug)] + struct Foo(u32); + + let mut world = World::new(); + + let eid = world.spawn(Foo(1)).id(); + + let mut dworld = DeferredWorld::from(&mut world); + + let (mut fetcher, mut commands) = dworld.entities_and_commands(); + let emut = fetcher.get_mut(eid).unwrap(); + commands.entity(eid).despawn(); + assert_eq!(emut.get::().unwrap(), &Foo(1)); + + world.flush(); + + assert!(world.get_entity(eid).is_err()); + } } diff --git a/crates/bevy_ecs/src/world/reflect.rs b/crates/bevy_ecs/src/world/reflect.rs index 4337416aa2..fdd8b28142 100644 --- a/crates/bevy_ecs/src/world/reflect.rs +++ b/crates/bevy_ecs/src/world/reflect.rs @@ -80,7 +80,7 @@ impl World { let component_name = self .components() .get_name(component_id) - .map(ToString::to_string); + .map(|name| name.to_string()); return Err(GetComponentReflectError::EntityDoesNotHaveComponent { entity, @@ -169,7 +169,7 @@ impl World { let component_name = self .components() .get_name(component_id) - .map(ToString::to_string); + .map(|name| name.to_string()); let Some(comp_mut_untyped) = self.get_mut_by_id(entity, component_id) else { return Err(GetComponentReflectError::EntityDoesNotHaveComponent { diff --git a/crates/bevy_ecs/src/world/spawn_batch.rs b/crates/bevy_ecs/src/world/spawn_batch.rs index cbeaf8f4ad..16bd9bb805 100644 --- a/crates/bevy_ecs/src/world/spawn_batch.rs +++ b/crates/bevy_ecs/src/world/spawn_batch.rs @@ -1,11 +1,10 @@ use crate::{ bundle::{Bundle, BundleSpawner, NoBundleEffect}, + change_detection::MaybeLocation, entity::{Entity, EntitySetIterator}, world::World, }; use core::iter::FusedIterator; -#[cfg(feature = "track_location")] -use core::panic::Location; /// An iterator that spawns a series of entities and returns the [ID](Entity) of /// each spawned entity. @@ -18,8 +17,7 @@ where { inner: I, spawner: BundleSpawner<'w>, - #[cfg(feature = "track_location")] - caller: &'static Location<'static>, + caller: MaybeLocation, } impl<'w, I> SpawnBatchIter<'w, I> @@ -29,11 +27,7 @@ where { #[inline] #[track_caller] - pub(crate) fn new( - world: &'w mut World, - iter: I, - #[cfg(feature = "track_location")] caller: &'static Location, - ) -> Self { + pub(crate) fn new(world: &'w mut World, iter: I, caller: MaybeLocation) -> Self { // Ensure all entity allocations are accounted for so `self.entities` can realloc if // necessary world.flush(); @@ -50,7 +44,6 @@ where Self { inner: iter, spawner, - #[cfg(feature = "track_location")] caller, } } @@ -80,17 +73,7 @@ where fn next(&mut self) -> Option { let bundle = self.inner.next()?; // SAFETY: bundle matches spawner type - unsafe { - Some( - self.spawner - .spawn( - bundle, - #[cfg(feature = "track_location")] - self.caller, - ) - .0, - ) - } + unsafe { Some(self.spawner.spawn(bundle, self.caller).0) } } fn size_hint(&self) -> (usize, Option) { diff --git a/crates/bevy_ecs/src/world/unsafe_world_cell.rs b/crates/bevy_ecs/src/world/unsafe_world_cell.rs index 5ad2a3504e..b46b4a154b 100644 --- a/crates/bevy_ecs/src/world/unsafe_world_cell.rs +++ b/crates/bevy_ecs/src/world/unsafe_world_cell.rs @@ -4,9 +4,9 @@ use super::{Mut, Ref, World, WorldId}; use crate::{ archetype::{Archetype, Archetypes}, bundle::Bundles, - change_detection::{MaybeUnsafeCellLocation, MutUntyped, Ticks, TicksMut}, + change_detection::{MaybeLocation, MutUntyped, Ticks, TicksMut}, component::{ComponentId, ComponentTicks, Components, Mutable, StorageType, Tick, TickCells}, - entity::{Entities, Entity, EntityBorrow, EntityLocation}, + entity::{ContainsEntity, Entities, Entity, EntityDoesNotExistError, EntityLocation}, observer::Observers, prelude::Component, query::{DebugCheckedUnwrap, ReadOnlyQueryData}, @@ -15,14 +15,11 @@ use crate::{ storage::{ComponentSparseSet, Storages, Table}, world::RawCommandQueue, }; -use bevy_platform_support::sync::atomic::Ordering; -use bevy_ptr::Ptr; -use core::{any::TypeId, cell::UnsafeCell, fmt::Debug, marker::PhantomData, ptr}; +use bevy_platform::sync::atomic::Ordering; +use bevy_ptr::{Ptr, UnsafeCellDeref}; +use core::{any::TypeId, cell::UnsafeCell, fmt::Debug, marker::PhantomData, panic::Location, ptr}; use thiserror::Error; -#[cfg(feature = "track_location")] -use {bevy_ptr::UnsafeCellDeref, core::panic::Location}; - /// Variant of the [`World`] where resource and component accesses take `&self`, and the responsibility to avoid /// aliasing violations are given to the caller instead of being checked at compile-time by rust's unique XOR shared rule. /// @@ -142,6 +139,12 @@ impl<'w> UnsafeWorldCell<'w> { /// Gets a mutable reference to the [`World`] this [`UnsafeWorldCell`] belongs to. /// This is an incredibly error-prone operation and is only valid in a small number of circumstances. /// + /// Calling this method implies mutable access to the *whole* world (see first point on safety section + /// below), which includes all entities, components, and resources. Notably, calling this on + /// [`WorldQuery::init_fetch`](crate::query::WorldQuery::init_fetch) and + /// [`SystemParam::get_param`](crate::system::SystemParam::get_param) are most likely *unsound* unless + /// you can prove that the underlying [`World`] is exclusive, which in normal circumstances is not. + /// /// # Safety /// - `self` must have been obtained from a call to [`World::as_unsafe_world_cell`] /// (*not* `as_unsafe_world_cell_readonly` or any other method of construction that @@ -354,9 +357,15 @@ impl<'w> UnsafeWorldCell<'w> { /// Retrieves an [`UnsafeEntityCell`] that exposes read and write operations for the given `entity`. /// Similar to the [`UnsafeWorldCell`], you are in charge of making sure that no aliasing rules are violated. #[inline] - pub fn get_entity(self, entity: Entity) -> Option> { - let location = self.entities().get(entity)?; - Some(UnsafeEntityCell::new(self, entity, location)) + pub fn get_entity( + self, + entity: Entity, + ) -> Result, EntityDoesNotExistError> { + let location = self + .entities() + .get(entity) + .ok_or(EntityDoesNotExistError::new(entity, self.entities()))?; + Ok(UnsafeEntityCell::new(self, entity, location)) } /// Gets a reference to the resource of the given type if it exists @@ -389,7 +398,7 @@ impl<'w> UnsafeWorldCell<'w> { // SAFETY: caller ensures `self` has permission to access the resource // caller also ensure that no mutable reference to the resource exists - let (ptr, ticks, _caller) = unsafe { self.get_resource_with_ticks(component_id)? }; + let (ptr, ticks, caller) = unsafe { self.get_resource_with_ticks(component_id)? }; // SAFETY: `component_id` was obtained from the type ID of `R` let value = unsafe { ptr.deref::() }; @@ -399,13 +408,11 @@ impl<'w> UnsafeWorldCell<'w> { unsafe { Ticks::from_tick_cells(ticks, self.last_change_tick(), self.change_tick()) }; // SAFETY: caller ensures that no mutable reference to the resource exists - #[cfg(feature = "track_location")] - let caller = unsafe { _caller.deref() }; + let caller = caller.map(|caller| unsafe { caller.deref() }); Some(Ref { value, ticks, - #[cfg(feature = "track_location")] changed_by: caller, }) } @@ -512,7 +519,7 @@ impl<'w> UnsafeWorldCell<'w> { self.assert_allows_mutable_access(); // SAFETY: we only access data that the caller has ensured is unaliased and `self` // has permission to access. - let (ptr, ticks, _caller) = unsafe { self.storages() } + let (ptr, ticks, caller) = unsafe { self.storages() } .resources .get(component_id)? .get_with_ticks()?; @@ -530,11 +537,10 @@ impl<'w> UnsafeWorldCell<'w> { // - caller ensures that the resource is unaliased value: unsafe { ptr.assert_unique() }, ticks, - #[cfg(feature = "track_location")] // SAFETY: // - caller ensures that `self` has permission to access the resource // - caller ensures that the resource is unaliased - changed_by: unsafe { _caller.deref_mut() }, + changed_by: unsafe { caller.map(|caller| caller.deref_mut()) }, }) } @@ -581,7 +587,7 @@ impl<'w> UnsafeWorldCell<'w> { let change_tick = self.change_tick(); // SAFETY: we only access data that the caller has ensured is unaliased and `self` // has permission to access. - let (ptr, ticks, _caller) = unsafe { self.storages() } + let (ptr, ticks, caller) = unsafe { self.storages() } .non_send_resources .get(component_id)? .get_with_ticks()?; @@ -596,9 +602,8 @@ impl<'w> UnsafeWorldCell<'w> { // SAFETY: This function has exclusive access to the world so nothing aliases `ptr`. value: unsafe { ptr.assert_unique() }, ticks, - #[cfg(feature = "track_location")] // SAFETY: This function has exclusive access to the world - changed_by: unsafe { _caller.deref_mut() }, + changed_by: unsafe { caller.map(|caller| caller.deref_mut()) }, }) } @@ -611,7 +616,11 @@ impl<'w> UnsafeWorldCell<'w> { pub(crate) unsafe fn get_resource_with_ticks( self, component_id: ComponentId, - ) -> Option<(Ptr<'w>, TickCells<'w>, MaybeUnsafeCellLocation<'w>)> { + ) -> Option<( + Ptr<'w>, + TickCells<'w>, + MaybeLocation<&'w UnsafeCell<&'static Location<'static>>>, + )> { // SAFETY: // - caller ensures there is no `&mut World` // - caller ensures there are no mutable borrows of this resource @@ -634,7 +643,11 @@ impl<'w> UnsafeWorldCell<'w> { pub(crate) unsafe fn get_non_send_with_ticks( self, component_id: ComponentId, - ) -> Option<(Ptr<'w>, TickCells<'w>, MaybeUnsafeCellLocation<'w>)> { + ) -> Option<( + Ptr<'w>, + TickCells<'w>, + MaybeLocation<&'w UnsafeCell<&'static Location<'static>>>, + )> { // SAFETY: // - caller ensures there is no `&mut World` // - caller ensures there are no mutable borrows of this resource @@ -664,7 +677,9 @@ impl<'w> UnsafeWorldCell<'w> { pub(crate) unsafe fn increment_trigger_id(self) { self.assert_allows_mutable_access(); // SAFETY: Caller ensure there are no outstanding references - unsafe { (*self.ptr).last_trigger_id += 1 } + unsafe { + (*self.ptr).last_trigger_id = (*self.ptr).last_trigger_id.wrapping_add(1); + } } } @@ -741,7 +756,7 @@ impl<'w> UnsafeEntityCell<'w> { /// /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using - /// [`Self::contains_type_id`]. + /// [`Self::contains_type_id`]. #[inline] pub fn contains_id(self, component_id: ComponentId) -> bool { self.archetype().contains(component_id) @@ -808,12 +823,11 @@ impl<'w> UnsafeEntityCell<'w> { self.entity, self.location, ) - .map(|(value, cells, _caller)| Ref { + .map(|(value, cells, caller)| Ref { // SAFETY: returned component is of type T value: value.deref::(), ticks: Ticks::from_tick_cells(cells, last_change_tick, change_tick), - #[cfg(feature = "track_location")] - changed_by: _caller.deref(), + changed_by: caller.map(|caller| caller.deref()), }) } } @@ -930,12 +944,11 @@ impl<'w> UnsafeEntityCell<'w> { self.entity, self.location, ) - .map(|(value, cells, _caller)| Mut { + .map(|(value, cells, caller)| Mut { // SAFETY: returned component is of type T value: value.assert_unique().deref_mut::(), ticks: TicksMut::from_tick_cells(cells, last_change_tick, change_tick), - #[cfg(feature = "track_location")] - changed_by: _caller.deref_mut(), + changed_by: caller.map(|caller| caller.deref_mut()), }) } } @@ -1054,7 +1067,7 @@ impl<'w> UnsafeEntityCell<'w> { self.entity, self.location, ) - .map(|(value, cells, _caller)| MutUntyped { + .map(|(value, cells, caller)| MutUntyped { // SAFETY: world access validated by caller and ties world lifetime to `MutUntyped` lifetime value: value.assert_unique(), ticks: TicksMut::from_tick_cells( @@ -1062,20 +1075,66 @@ impl<'w> UnsafeEntityCell<'w> { self.world.last_change_tick(), self.world.change_tick(), ), - #[cfg(feature = "track_location")] - changed_by: _caller.deref_mut(), + changed_by: caller.map(|caller| caller.deref_mut()), + }) + .ok_or(GetEntityMutByIdError::ComponentNotFound) + } + } + + /// Retrieves a mutable untyped reference to the given `entity`'s [`Component`] of the given [`ComponentId`]. + /// Returns `None` if the `entity` does not have a [`Component`] of the given type. + /// This method assumes the [`Component`] is mutable, skipping that check. + /// + /// **You should prefer to use the typed API [`UnsafeEntityCell::get_mut_assume_mutable`] where possible and only + /// use this in cases where the actual types are not known at compile time.** + /// + /// # Safety + /// It is the callers responsibility to ensure that + /// - the [`UnsafeEntityCell`] has permission to access the component mutably + /// - no other references to the component exist at the same time + /// - the component `T` is mutable + #[inline] + pub unsafe fn get_mut_assume_mutable_by_id( + self, + component_id: ComponentId, + ) -> Result, GetEntityMutByIdError> { + self.world.assert_allows_mutable_access(); + + let info = self + .world + .components() + .get_info(component_id) + .ok_or(GetEntityMutByIdError::InfoNotFound)?; + + // SAFETY: entity_location is valid, component_id is valid as checked by the line above + unsafe { + get_component_and_ticks( + self.world, + component_id, + info.storage_type(), + self.entity, + self.location, + ) + .map(|(value, cells, caller)| MutUntyped { + // SAFETY: world access validated by caller and ties world lifetime to `MutUntyped` lifetime + value: value.assert_unique(), + ticks: TicksMut::from_tick_cells( + cells, + self.world.last_change_tick(), + self.world.change_tick(), + ), + changed_by: caller.map(|caller| caller.deref_mut()), }) .ok_or(GetEntityMutByIdError::ComponentNotFound) } } /// Returns the source code location from which this entity has been spawned. - #[cfg(feature = "track_location")] - pub fn spawned_by(self) -> &'static Location<'static> { + pub fn spawned_by(self) -> MaybeLocation { self.world() .entities() .entity_get_spawned_or_despawned_by(self.entity) - .unwrap() + .map(|o| o.unwrap()) } } @@ -1122,7 +1181,7 @@ impl<'w> UnsafeWorldCell<'w> { /// /// # Safety /// - `location` must refer to an archetype that contains `entity` -/// the archetype +/// the archetype /// - `component_id` must be valid /// - `storage_type` must accurately reflect where the components for `component_id` are stored. /// - the caller must ensure that no aliasing rules are violated @@ -1159,7 +1218,11 @@ unsafe fn get_component_and_ticks( storage_type: StorageType, entity: Entity, location: EntityLocation, -) -> Option<(Ptr<'_>, TickCells<'_>, MaybeUnsafeCellLocation<'_>)> { +) -> Option<( + Ptr<'_>, + TickCells<'_>, + MaybeLocation<&UnsafeCell<&'static Location<'static>>>, +)> { match storage_type { StorageType::Table => { let table = world.fetch_table(location)?; @@ -1175,12 +1238,9 @@ unsafe fn get_component_and_ticks( .get_changed_tick(component_id, location.table_row) .debug_checked_unwrap(), }, - #[cfg(feature = "track_location")] table .get_changed_by(component_id, location.table_row) - .debug_checked_unwrap(), - #[cfg(not(feature = "track_location"))] - (), + .map(|changed_by| changed_by.debug_checked_unwrap()), )) } StorageType::SparseSet => world.fetch_sparse_set(component_id)?.get_with_ticks(entity), @@ -1191,7 +1251,7 @@ unsafe fn get_component_and_ticks( /// /// # Safety /// - `location` must refer to an archetype that contains `entity` -/// the archetype +/// the archetype /// - `component_id` must be valid /// - `storage_type` must accurately reflect where the components for `component_id` are stored. /// - the caller must ensure that no aliasing rules are violated @@ -1213,7 +1273,7 @@ unsafe fn get_ticks( } } -impl EntityBorrow for UnsafeEntityCell<'_> { +impl ContainsEntity for UnsafeEntityCell<'_> { fn entity(&self) -> Entity { self.id() } diff --git a/crates/bevy_encase_derive/Cargo.toml b/crates/bevy_encase_derive/Cargo.toml index f35c44db3d..b2f1b92d82 100644 --- a/crates/bevy_encase_derive/Cargo.toml +++ b/crates/bevy_encase_derive/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_encase_derive" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Bevy derive macro for encase" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_encase_derive/src/lib.rs b/crates/bevy_encase_derive/src/lib.rs index eccfcdd01d..15fbdca6a8 100644 --- a/crates/bevy_encase_derive/src/lib.rs +++ b/crates/bevy_encase_derive/src/lib.rs @@ -14,7 +14,7 @@ const ENCASE: &str = "encase"; fn bevy_encase_path() -> syn::Path { let bevy_manifest = BevyManifest::shared(); bevy_manifest - .get_subcrate("render") + .maybe_get_path("bevy_render") .map(|bevy_render_path| { let mut segments = bevy_render_path.segments; segments.push(BevyManifest::parse_str("render_resource")); @@ -31,7 +31,7 @@ fn bevy_encase_path() -> syn::Path { segments, } }) - .unwrap_or_else(|_err| bevy_manifest.get_path(ENCASE)) + .unwrap_or_else(|| bevy_manifest.get_path(ENCASE)) } implement!(bevy_encase_path()); diff --git a/crates/bevy_gilrs/Cargo.toml b/crates/bevy_gilrs/Cargo.toml index 9b3996c05f..864df285d9 100644 --- a/crates/bevy_gilrs/Cargo.toml +++ b/crates/bevy_gilrs/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_gilrs" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Gamepad system made using Gilrs for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -15,7 +15,7 @@ bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } bevy_input = { path = "../bevy_input", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } bevy_time = { path = "../bevy_time", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", ] } diff --git a/crates/bevy_gilrs/src/gilrs_system.rs b/crates/bevy_gilrs/src/gilrs_system.rs index 05f9aa02e9..69a608a8b8 100644 --- a/crates/bevy_gilrs/src/gilrs_system.rs +++ b/crates/bevy_gilrs/src/gilrs_system.rs @@ -4,8 +4,6 @@ use crate::{ }; use bevy_ecs::event::EventWriter; use bevy_ecs::prelude::Commands; -#[cfg(target_arch = "wasm32")] -use bevy_ecs::system::NonSendMut; use bevy_ecs::system::ResMut; use bevy_input::gamepad::{ GamepadConnection, GamepadConnectionEvent, RawGamepadAxisChangedEvent, @@ -15,101 +13,103 @@ use gilrs::{ev::filter::axis_dpad_to_button, EventType, Filter}; pub fn gilrs_event_startup_system( mut commands: Commands, - #[cfg(target_arch = "wasm32")] mut gilrs: NonSendMut, - #[cfg(not(target_arch = "wasm32"))] mut gilrs: ResMut, + mut gilrs: ResMut, mut gamepads: ResMut, mut events: EventWriter, ) { - for (id, gamepad) in gilrs.0.get().gamepads() { - // Create entity and add to mapping - let entity = commands.spawn_empty().id(); - gamepads.id_to_entity.insert(id, entity); - gamepads.entity_to_id.insert(entity, id); - - events.send(GamepadConnectionEvent { - gamepad: entity, - connection: GamepadConnection::Connected { - name: gamepad.name().to_string(), - vendor_id: gamepad.vendor_id(), - product_id: gamepad.product_id(), - }, - }); - } + gilrs.with(|gilrs| { + for (id, gamepad) in gilrs.gamepads() { + // Create entity and add to mapping + let entity = commands.spawn_empty().id(); + gamepads.id_to_entity.insert(id, entity); + gamepads.entity_to_id.insert(entity, id); + events.write(GamepadConnectionEvent { + gamepad: entity, + connection: GamepadConnection::Connected { + name: gamepad.name().to_string(), + vendor_id: gamepad.vendor_id(), + product_id: gamepad.product_id(), + }, + }); + } + }); } pub fn gilrs_event_system( mut commands: Commands, - #[cfg(target_arch = "wasm32")] mut gilrs: NonSendMut, - #[cfg(not(target_arch = "wasm32"))] mut gilrs: ResMut, + mut gilrs: ResMut, mut gamepads: ResMut, mut events: EventWriter, mut connection_events: EventWriter, mut button_events: EventWriter, mut axis_event: EventWriter, ) { - let gilrs = gilrs.0.get(); - while let Some(gilrs_event) = gilrs.next_event().filter_ev(&axis_dpad_to_button, gilrs) { - gilrs.update(&gilrs_event); - match gilrs_event.event { - EventType::Connected => { - let pad = gilrs.gamepad(gilrs_event.id); - let entity = gamepads.get_entity(gilrs_event.id).unwrap_or_else(|| { - let entity = commands.spawn_empty().id(); - gamepads.id_to_entity.insert(gilrs_event.id, entity); - gamepads.entity_to_id.insert(entity, gilrs_event.id); - entity - }); + gilrs.with(|gilrs| { + while let Some(gilrs_event) = gilrs.next_event().filter_ev(&axis_dpad_to_button, gilrs) { + gilrs.update(&gilrs_event); + match gilrs_event.event { + EventType::Connected => { + let pad = gilrs.gamepad(gilrs_event.id); + let entity = gamepads.get_entity(gilrs_event.id).unwrap_or_else(|| { + let entity = commands.spawn_empty().id(); + gamepads.id_to_entity.insert(gilrs_event.id, entity); + gamepads.entity_to_id.insert(entity, gilrs_event.id); + entity + }); - let event = GamepadConnectionEvent::new( - entity, - GamepadConnection::Connected { - name: pad.name().to_string(), - vendor_id: pad.vendor_id(), - product_id: pad.product_id(), - }, - ); - - events.send(event.clone().into()); - connection_events.send(event); - } - EventType::Disconnected => { - let gamepad = gamepads - .id_to_entity - .get(&gilrs_event.id) - .copied() - .expect("mapping should exist from connection"); - let event = GamepadConnectionEvent::new(gamepad, GamepadConnection::Disconnected); - events.send(event.clone().into()); - connection_events.send(event); - } - EventType::ButtonChanged(gilrs_button, raw_value, _) => { - let Some(button) = convert_button(gilrs_button) else { - continue; - }; - let gamepad = gamepads - .id_to_entity - .get(&gilrs_event.id) - .copied() - .expect("mapping should exist from connection"); - events.send(RawGamepadButtonChangedEvent::new(gamepad, button, raw_value).into()); - button_events.send(RawGamepadButtonChangedEvent::new( - gamepad, button, raw_value, - )); - } - EventType::AxisChanged(gilrs_axis, raw_value, _) => { - let Some(axis) = convert_axis(gilrs_axis) else { - continue; - }; - let gamepad = gamepads - .id_to_entity - .get(&gilrs_event.id) - .copied() - .expect("mapping should exist from connection"); - events.send(RawGamepadAxisChangedEvent::new(gamepad, axis, raw_value).into()); - axis_event.send(RawGamepadAxisChangedEvent::new(gamepad, axis, raw_value)); - } - _ => (), - }; - } - gilrs.inc(); + let event = GamepadConnectionEvent::new( + entity, + GamepadConnection::Connected { + name: pad.name().to_string(), + vendor_id: pad.vendor_id(), + product_id: pad.product_id(), + }, + ); + events.write(event.clone().into()); + connection_events.write(event); + } + EventType::Disconnected => { + let gamepad = gamepads + .id_to_entity + .get(&gilrs_event.id) + .copied() + .expect("mapping should exist from connection"); + let event = + GamepadConnectionEvent::new(gamepad, GamepadConnection::Disconnected); + events.write(event.clone().into()); + connection_events.write(event); + } + EventType::ButtonChanged(gilrs_button, raw_value, _) => { + let Some(button) = convert_button(gilrs_button) else { + continue; + }; + let gamepad = gamepads + .id_to_entity + .get(&gilrs_event.id) + .copied() + .expect("mapping should exist from connection"); + events.write( + RawGamepadButtonChangedEvent::new(gamepad, button, raw_value).into(), + ); + button_events.write(RawGamepadButtonChangedEvent::new( + gamepad, button, raw_value, + )); + } + EventType::AxisChanged(gilrs_axis, raw_value, _) => { + let Some(axis) = convert_axis(gilrs_axis) else { + continue; + }; + let gamepad = gamepads + .id_to_entity + .get(&gilrs_event.id) + .copied() + .expect("mapping should exist from connection"); + events.write(RawGamepadAxisChangedEvent::new(gamepad, axis, raw_value).into()); + axis_event.write(RawGamepadAxisChangedEvent::new(gamepad, axis, raw_value)); + } + _ => (), + }; + } + gilrs.inc(); + }); } diff --git a/crates/bevy_gilrs/src/lib.rs b/crates/bevy_gilrs/src/lib.rs index b9f1443e5b..ce0d5f27f0 100644 --- a/crates/bevy_gilrs/src/lib.rs +++ b/crates/bevy_gilrs/src/lib.rs @@ -14,19 +14,48 @@ mod converter; mod gilrs_system; mod rumble; +#[cfg(not(target_arch = "wasm32"))] +use bevy_utils::synccell::SyncCell; + +#[cfg(target_arch = "wasm32")] +use core::cell::RefCell; + use bevy_app::{App, Plugin, PostUpdate, PreStartup, PreUpdate}; -use bevy_ecs::entity::hash_map::EntityHashMap; +use bevy_ecs::entity::EntityHashMap; use bevy_ecs::prelude::*; use bevy_input::InputSystem; -use bevy_platform_support::collections::HashMap; -use bevy_utils::synccell::SyncCell; +use bevy_platform::collections::HashMap; use gilrs::GilrsBuilder; use gilrs_system::{gilrs_event_startup_system, gilrs_event_system}; use rumble::{play_gilrs_rumble, RunningRumbleEffects}; use tracing::error; -#[cfg_attr(not(target_arch = "wasm32"), derive(Resource))] -pub(crate) struct Gilrs(pub SyncCell); +#[cfg(target_arch = "wasm32")] +thread_local! { + /// Temporary storage of gilrs data to replace usage of `!Send` resources. This will be replaced with proper + /// storage of `!Send` data after issue #17667 is complete. + /// + /// Using a `thread_local!` here relies on the fact that wasm32 can only be single threaded. Previously, we used a + /// `NonSendMut` parameter, which told Bevy that the system was `!Send`, but now with the removal of `!Send` + /// resource/system parameter usage, there is no internal guarantee that the system will run in only one thread, so + /// we need to rely on the platform to make such a guarantee. + static GILRS: RefCell> = const { RefCell::new(None) }; +} + +#[derive(Resource)] +pub(crate) struct Gilrs { + #[cfg(not(target_arch = "wasm32"))] + cell: SyncCell, +} +impl Gilrs { + #[inline] + pub fn with(&mut self, f: impl FnOnce(&mut gilrs::Gilrs)) { + #[cfg(target_arch = "wasm32")] + GILRS.with(|g| f(g.borrow_mut().as_mut().expect("GILRS was not initialized"))); + #[cfg(not(target_arch = "wasm32"))] + f(self.cell.get()); + } +} /// A [`resource`](Resource) with the mapping of connected [`gilrs::GamepadId`] and their [`Entity`]. #[derive(Debug, Default, Resource)] @@ -65,10 +94,15 @@ impl Plugin for GilrsPlugin { .build() { Ok(gilrs) => { + let g = Gilrs { + #[cfg(not(target_arch = "wasm32"))] + cell: SyncCell::new(gilrs), + }; #[cfg(target_arch = "wasm32")] - app.insert_non_send_resource(Gilrs(SyncCell::new(gilrs))); - #[cfg(not(target_arch = "wasm32"))] - app.insert_resource(Gilrs(SyncCell::new(gilrs))); + GILRS.with(|g| { + g.replace(Some(gilrs)); + }); + app.insert_resource(g); app.init_resource::(); app.init_resource::() .add_systems(PreStartup, gilrs_event_startup_system) diff --git a/crates/bevy_gilrs/src/rumble.rs b/crates/bevy_gilrs/src/rumble.rs index 53a0c945ce..8f41a3ca22 100644 --- a/crates/bevy_gilrs/src/rumble.rs +++ b/crates/bevy_gilrs/src/rumble.rs @@ -1,10 +1,8 @@ //! Handle user specified rumble request events. use crate::{Gilrs, GilrsGamepads}; use bevy_ecs::prelude::{EventReader, Res, ResMut, Resource}; -#[cfg(target_arch = "wasm32")] -use bevy_ecs::system::NonSendMut; use bevy_input::gamepad::{GamepadRumbleIntensity, GamepadRumbleRequest}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use bevy_time::{Real, Time}; use bevy_utils::synccell::SyncCell; use core::time::Duration; @@ -128,42 +126,42 @@ fn handle_rumble_request( } pub(crate) fn play_gilrs_rumble( time: Res>, - #[cfg(target_arch = "wasm32")] mut gilrs: NonSendMut, - #[cfg(not(target_arch = "wasm32"))] mut gilrs: ResMut, + mut gilrs: ResMut, gamepads: Res, mut requests: EventReader, mut running_rumbles: ResMut, ) { - let gilrs = gilrs.0.get(); - let current_time = time.elapsed(); - // Remove outdated rumble effects. - for rumbles in running_rumbles.rumbles.values_mut() { - // `ff::Effect` uses RAII, dropping = deactivating - rumbles.retain(|RunningRumble { deadline, .. }| *deadline >= current_time); - } - running_rumbles - .rumbles - .retain(|_gamepad, rumbles| !rumbles.is_empty()); + gilrs.with(|gilrs| { + let current_time = time.elapsed(); + // Remove outdated rumble effects. + for rumbles in running_rumbles.rumbles.values_mut() { + // `ff::Effect` uses RAII, dropping = deactivating + rumbles.retain(|RunningRumble { deadline, .. }| *deadline >= current_time); + } + running_rumbles + .rumbles + .retain(|_gamepad, rumbles| !rumbles.is_empty()); - // Add new effects. - for rumble in requests.read().cloned() { - let gamepad = rumble.gamepad(); - match handle_rumble_request(&mut running_rumbles, gilrs, &gamepads, rumble, current_time) { - Ok(()) => {} - Err(RumbleError::GilrsError(err)) => { - if let ff::Error::FfNotSupported(_) = err { - debug!("Tried to rumble {gamepad:?}, but it doesn't support force feedback"); - } else { - warn!( - "Tried to handle rumble request for {gamepad:?} but an error occurred: {err}" - ); + // Add new effects. + for rumble in requests.read().cloned() { + let gamepad = rumble.gamepad(); + match handle_rumble_request(&mut running_rumbles, gilrs, &gamepads, rumble, current_time) { + Ok(()) => {} + Err(RumbleError::GilrsError(err)) => { + if let ff::Error::FfNotSupported(_) = err { + debug!("Tried to rumble {gamepad:?}, but it doesn't support force feedback"); + } else { + warn!( + "Tried to handle rumble request for {gamepad:?} but an error occurred: {err}" + ); + } } - } - Err(RumbleError::GamepadNotFound) => { - warn!("Tried to handle rumble request {gamepad:?} but it doesn't exist!"); - } - }; - } + Err(RumbleError::GamepadNotFound) => { + warn!("Tried to handle rumble request {gamepad:?} but it doesn't exist!"); + } + }; + } + }); } #[cfg(test)] diff --git a/crates/bevy_gizmos/Cargo.toml b/crates/bevy_gizmos/Cargo.toml index 6e51c60944..3a264c6244 100644 --- a/crates/bevy_gizmos/Cargo.toml +++ b/crates/bevy_gizmos/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_gizmos" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides gizmos for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_gizmos/macros/Cargo.toml b/crates/bevy_gizmos/macros/Cargo.toml index 3862914d72..b38a3c5374 100644 --- a/crates/bevy_gizmos/macros/Cargo.toml +++ b/crates/bevy_gizmos/macros/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_gizmos_macros" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Derive implementations for bevy_gizmos" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_gizmos/src/aabb.rs b/crates/bevy_gizmos/src/aabb.rs index 605a4c2e88..16dc7ed773 100644 --- a/crates/bevy_gizmos/src/aabb.rs +++ b/crates/bevy_gizmos/src/aabb.rs @@ -7,7 +7,7 @@ use bevy_ecs::{ entity::Entity, query::Without, reflect::ReflectComponent, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Query, Res}, }; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -45,6 +45,7 @@ impl Plugin for AabbGizmoPlugin { } /// The [`GizmoConfigGroup`] used for debug visualizations of [`Aabb`] components on entities #[derive(Clone, Default, Reflect, GizmoConfigGroup)] +#[reflect(Clone, Default)] pub struct AabbGizmoConfigGroup { /// Draws all bounding boxes in the scene when set to `true`. /// diff --git a/crates/bevy_gizmos/src/arcs.rs b/crates/bevy_gizmos/src/arcs.rs index 65f5f67ee7..41647f9fe2 100644 --- a/crates/bevy_gizmos/src/arcs.rs +++ b/crates/bevy_gizmos/src/arcs.rs @@ -136,11 +136,11 @@ where /// /// # Arguments /// - `angle`: sets how much of a circle circumference is passed, e.g. PI is half a circle. This - /// value should be in the range (-2 * PI..=2 * PI) + /// value should be in the range (-2 * PI..=2 * PI) /// - `radius`: distance between the arc and its center point /// - `isometry` defines the translation and rotation of the arc. - /// - the translation specifies the center of the arc - /// - the rotation is counter-clockwise starting from `Vec3::Y` + /// - the translation specifies the center of the arc + /// - the rotation is counter-clockwise starting from `Vec3::Y` /// - `color`: color of the arc /// /// # Builder methods @@ -219,10 +219,10 @@ where /// /// # Notes /// - This method assumes that the points `from` and `to` are distinct from `center`. If one of - /// the points is coincident with `center`, nothing is rendered. + /// the points is coincident with `center`, nothing is rendered. /// - The arc is drawn as a portion of a circle with a radius equal to the distance from the - /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then - /// the results will behave as if this were the case + /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then + /// the results will behave as if this were the case #[inline] pub fn short_arc_3d_between( &mut self, @@ -265,10 +265,10 @@ where /// /// # Notes /// - This method assumes that the points `from` and `to` are distinct from `center`. If one of - /// the points is coincident with `center`, nothing is rendered. + /// the points is coincident with `center`, nothing is rendered. /// - The arc is drawn as a portion of a circle with a radius equal to the distance from the - /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then - /// the results will behave as if this were the case. + /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then + /// the results will behave as if this were the case. #[inline] pub fn long_arc_3d_between( &mut self, @@ -352,10 +352,10 @@ where /// /// # Notes /// - This method assumes that the points `from` and `to` are distinct from `center`. If one of - /// the points is coincident with `center`, nothing is rendered. + /// the points is coincident with `center`, nothing is rendered. /// - The arc is drawn as a portion of a circle with a radius equal to the distance from the - /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then - /// the results will behave as if this were the case + /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then + /// the results will behave as if this were the case #[inline] pub fn short_arc_2d_between( &mut self, @@ -398,10 +398,10 @@ where /// /// # Notes /// - This method assumes that the points `from` and `to` are distinct from `center`. If one of - /// the points is coincident with `center`, nothing is rendered. + /// the points is coincident with `center`, nothing is rendered. /// - The arc is drawn as a portion of a circle with a radius equal to the distance from the - /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then - /// the results will behave as if this were the case. + /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then + /// the results will behave as if this were the case. #[inline] pub fn long_arc_2d_between( &mut self, diff --git a/crates/bevy_gizmos/src/config.rs b/crates/bevy_gizmos/src/config.rs index bba3ff284c..973fa1cf0f 100644 --- a/crates/bevy_gizmos/src/config.rs +++ b/crates/bevy_gizmos/src/config.rs @@ -20,6 +20,7 @@ use core::{ /// An enum configuring how line joints will be drawn. #[derive(Debug, Default, Copy, Clone, Reflect, PartialEq, Eq, Hash)] +#[reflect(Default, PartialEq, Hash, Clone)] pub enum GizmoLineJoint { /// Does not draw any line joints. #[default] @@ -37,6 +38,7 @@ pub enum GizmoLineJoint { /// An enum used to configure the style of gizmo lines, similar to CSS line-style #[derive(Copy, Clone, Debug, Default, PartialEq, Reflect)] +#[reflect(Default, PartialEq, Hash, Clone)] #[non_exhaustive] pub enum GizmoLineStyle { /// A solid line without any decorators @@ -83,11 +85,13 @@ pub trait GizmoConfigGroup: Reflect + TypePath + Default {} /// The default gizmo config group. #[derive(Default, Reflect, GizmoConfigGroup)] +#[reflect(Default)] pub struct DefaultGizmoConfigGroup; /// Used when the gizmo config group needs to be type-erased. /// Also used for retained gizmos, which can't have a gizmo config group. #[derive(Default, Reflect, GizmoConfigGroup, Debug, Clone)] +#[reflect(Default, Clone)] pub struct ErasedGizmoConfigGroup; /// A [`Resource`] storing [`GizmoConfig`] and [`GizmoConfigGroup`] structs @@ -167,6 +171,7 @@ impl GizmoConfigStore { /// A struct that stores configuration for gizmos. #[derive(Clone, Reflect, Debug)] +#[reflect(Clone, Default)] pub struct GizmoConfig { /// Set to `false` to stop drawing gizmos. /// @@ -208,6 +213,7 @@ impl Default for GizmoConfig { /// A struct that stores configuration for gizmos. #[derive(Clone, Reflect, Debug)] +#[reflect(Clone, Default)] pub struct GizmoLineConfig { /// Line width specified in pixels. /// diff --git a/crates/bevy_gizmos/src/gizmos.rs b/crates/bevy_gizmos/src/gizmos.rs index b1fe363f51..b51dd672fe 100644 --- a/crates/bevy_gizmos/src/gizmos.rs +++ b/crates/bevy_gizmos/src/gizmos.rs @@ -11,11 +11,14 @@ use bevy_color::{Color, LinearRgba}; use bevy_ecs::{ component::Tick, resource::Resource, - system::{Deferred, ReadOnlySystemParam, Res, SystemBuffer, SystemMeta, SystemParam}, + system::{ + Deferred, ReadOnlySystemParam, Res, SystemBuffer, SystemMeta, SystemParam, + SystemParamValidationError, + }, world::{unsafe_world_cell::UnsafeWorldCell, World}, }; use bevy_math::{Isometry2d, Isometry3d, Vec2, Vec3}; -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_transform::TransformPoint; use bevy_utils::default; @@ -222,7 +225,7 @@ where state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { // SAFETY: Delegated to existing `SystemParam` implementations. unsafe { GizmosState::::validate_param(&state.state, system_meta, world) } } @@ -274,6 +277,7 @@ where /// Buffer for gizmo vertex data. #[derive(Debug, Clone, Reflect)] +#[reflect(Default)] pub struct GizmoBuffer where Config: GizmoConfigGroup, @@ -284,7 +288,7 @@ where pub(crate) list_colors: Vec, pub(crate) strip_positions: Vec, pub(crate) strip_colors: Vec, - #[reflect(ignore)] + #[reflect(ignore, clone)] pub(crate) marker: PhantomData<(Config, Clear)>, } @@ -820,8 +824,7 @@ where let polymorphic_color: Color = color.into(); let linear_color = LinearRgba::from(polymorphic_color); - self.list_colors - .extend(iter::repeat(linear_color).take(count)); + self.list_colors.extend(iter::repeat_n(linear_color, count)); } #[inline] diff --git a/crates/bevy_gizmos/src/grid.rs b/crates/bevy_gizmos/src/grid.rs index 42742e196c..cdcfc41236 100644 --- a/crates/bevy_gizmos/src/grid.rs +++ b/crates/bevy_gizmos/src/grid.rs @@ -186,10 +186,9 @@ where /// # Arguments /// /// - `isometry` defines the translation and rotation of the grid. - /// - the translation specifies the center of the grid - /// - defines the orientation of the grid, by default - /// we assume the grid is contained in a plane parallel - /// to the XY plane + /// - the translation specifies the center of the grid + /// - defines the orientation of the grid, by default we assume the grid is contained in a + /// plane parallel to the XY plane /// - `cell_count`: defines the amount of cells in the x and y axes /// - `spacing`: defines the distance between cells along the x and y axes /// - `color`: color of the grid @@ -241,9 +240,8 @@ where /// # Arguments /// /// - `isometry` defines the translation and rotation of the grid. - /// - the translation specifies the center of the grid - /// - defines the orientation of the grid, by default - /// we assume the grid is aligned with all axes + /// - the translation specifies the center of the grid + /// - defines the orientation of the grid, by default we assume the grid is aligned with all axes /// - `cell_count`: defines the amount of cells in the x, y and z axes /// - `spacing`: defines the distance between cells along the x, y and z axes /// - `color`: color of the grid @@ -295,9 +293,8 @@ where /// # Arguments /// /// - `isometry` defines the translation and rotation of the grid. - /// - the translation specifies the center of the grid - /// - defines the orientation of the grid, by default - /// we assume the grid is aligned with all axes + /// - the translation specifies the center of the grid + /// - defines the orientation of the grid, by default we assume the grid is aligned with all axes /// - `cell_count`: defines the amount of cells in the x and y axes /// - `spacing`: defines the distance between cells along the x and y axes /// - `color`: color of the grid diff --git a/crates/bevy_gizmos/src/lib.rs b/crates/bevy_gizmos/src/lib.rs old mode 100644 new mode 100755 index fdc2243233..3cd2c7c404 --- a/crates/bevy_gizmos/src/lib.rs +++ b/crates/bevy_gizmos/src/lib.rs @@ -79,13 +79,12 @@ pub mod prelude { } use bevy_app::{App, FixedFirst, FixedLast, Last, Plugin, RunFixedMainLoop}; -use bevy_asset::{weak_handle, Asset, AssetApp, AssetId, Assets, Handle}; +use bevy_asset::{Asset, AssetApp, Assets, Handle}; use bevy_ecs::{ resource::Resource, - schedule::{IntoSystemConfigs, SystemSet}, + schedule::{IntoScheduleConfigs, SystemSet}, system::{Res, ResMut}, }; -use bevy_math::{Vec3, Vec4}; use bevy_reflect::TypePath; #[cfg(all( @@ -99,6 +98,7 @@ use crate::{config::ErasedGizmoConfigGroup, gizmos::GizmoBuffer}; #[cfg(feature = "bevy_render")] use { crate::retained::extract_linegizmos, + bevy_asset::{weak_handle, AssetId}, bevy_ecs::{ component::Component, entity::Entity, @@ -108,7 +108,7 @@ use { Commands, SystemParamItem, }, }, - bevy_math::{Affine3, Affine3A}, + bevy_math::{Affine3, Affine3A, Vec4}, bevy_render::{ extract_component::{ComponentUniforms, DynamicUniformIndex, UniformComponentPlugin}, render_asset::{PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets}, @@ -132,9 +132,9 @@ use { use bevy_render::render_resource::{VertexAttribute, VertexBufferLayout, VertexStepMode}; use bevy_time::Fixed; use bevy_utils::TypeIdMap; -use config::{ - DefaultGizmoConfigGroup, GizmoConfig, GizmoConfigGroup, GizmoConfigStore, GizmoLineJoint, -}; +#[cfg(feature = "bevy_render")] +use config::GizmoLineJoint; +use config::{DefaultGizmoConfigGroup, GizmoConfig, GizmoConfigGroup, GizmoConfigStore}; use core::{any::TypeId, marker::PhantomData, mem}; use gizmos::{GizmoStorage, Swap}; #[cfg(all(feature = "bevy_pbr", feature = "bevy_render"))] @@ -503,7 +503,7 @@ struct LineGizmoUniform { line_scale: f32, /// WebGL2 structs must be 16 byte aligned. #[cfg(feature = "webgl")] - _padding: Vec3, + _padding: bevy_math::Vec3, } /// A collection of gizmos. diff --git a/crates/bevy_gizmos/src/light.rs b/crates/bevy_gizmos/src/light.rs index d6df0b7d3b..7f7dadacc2 100644 --- a/crates/bevy_gizmos/src/light.rs +++ b/crates/bevy_gizmos/src/light.rs @@ -14,7 +14,7 @@ use bevy_ecs::{ entity::Entity, query::Without, reflect::ReflectComponent, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Query, Res}, }; use bevy_math::{ @@ -133,6 +133,7 @@ impl Plugin for LightGizmoPlugin { /// Configures how a color is attributed to a light gizmo. #[derive(Debug, Clone, Copy, Default, Reflect)] +#[reflect(Clone, Default)] pub enum LightGizmoColor { /// User-specified color. Manual(Color), @@ -147,6 +148,7 @@ pub enum LightGizmoColor { /// The [`GizmoConfigGroup`] used to configure the visualization of lights. #[derive(Clone, Reflect, GizmoConfigGroup)] +#[reflect(Clone, Default)] pub struct LightGizmoConfigGroup { /// Draw a gizmo for all lights if true. /// diff --git a/crates/bevy_gizmos/src/pipeline_2d.rs b/crates/bevy_gizmos/src/pipeline_2d.rs index 72a2428ff0..3a43055491 100644 --- a/crates/bevy_gizmos/src/pipeline_2d.rs +++ b/crates/bevy_gizmos/src/pipeline_2d.rs @@ -10,7 +10,7 @@ use bevy_core_pipeline::core_2d::{Transparent2d, CORE_2D_DEPTH_FORMAT}; use bevy_ecs::{ prelude::Entity, resource::Resource, - schedule::{IntoSystemConfigs, IntoSystemSetConfigs}, + schedule::IntoScheduleConfigs, system::{Query, Res, ResMut}, world::{FromWorld, World}, }; @@ -341,6 +341,7 @@ fn queue_line_gizmos_2d( sort_key: FloatOrd(f32::INFINITY), batch_range: 0..1, extra_index: PhaseItemExtraIndex::None, + extracted_index: usize::MAX, indexed: false, }); } @@ -362,6 +363,7 @@ fn queue_line_gizmos_2d( sort_key: FloatOrd(f32::INFINITY), batch_range: 0..1, extra_index: PhaseItemExtraIndex::None, + extracted_index: usize::MAX, indexed: false, }); } @@ -421,6 +423,7 @@ fn queue_line_joint_gizmos_2d( sort_key: FloatOrd(f32::INFINITY), batch_range: 0..1, extra_index: PhaseItemExtraIndex::None, + extracted_index: usize::MAX, indexed: false, }); } diff --git a/crates/bevy_gizmos/src/pipeline_3d.rs b/crates/bevy_gizmos/src/pipeline_3d.rs index f7806b64d6..799793e6cb 100644 --- a/crates/bevy_gizmos/src/pipeline_3d.rs +++ b/crates/bevy_gizmos/src/pipeline_3d.rs @@ -7,6 +7,7 @@ use crate::{ use bevy_app::{App, Plugin}; use bevy_core_pipeline::{ core_3d::{Transparent3d, CORE_3D_DEPTH_FORMAT}, + oit::OrderIndependentTransparencySettings, prepass::{DeferredPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass}, }; @@ -14,7 +15,7 @@ use bevy_ecs::{ prelude::Entity, query::Has, resource::Resource, - schedule::{IntoSystemConfigs, IntoSystemSetConfigs}, + schedule::IntoScheduleConfigs, system::{Query, Res, ResMut}, world::{FromWorld, World}, }; @@ -301,6 +302,7 @@ fn queue_line_gizmos_3d( Has, Has, Has, + Has, ), )>, ) { @@ -314,7 +316,7 @@ fn queue_line_gizmos_3d( view, msaa, render_layers, - (normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass), + (normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass, oit), ) in &views { let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity) @@ -343,6 +345,10 @@ fn queue_line_gizmos_3d( view_key |= MeshPipelineKey::DEFERRED_PREPASS; } + if oit { + view_key |= MeshPipelineKey::OIT_ENABLED; + } + for (entity, main_entity, config) in &line_gizmos { if !config.render_layers.intersects(render_layers) { continue; diff --git a/crates/bevy_gizmos/src/primitives/dim2.rs b/crates/bevy_gizmos/src/primitives/dim2.rs index d38ba3ab4c..9535c28fbd 100644 --- a/crates/bevy_gizmos/src/primitives/dim2.rs +++ b/crates/bevy_gizmos/src/primitives/dim2.rs @@ -634,9 +634,7 @@ where return; } - let segment = Segment2d::new(self.point1, self.point2) - .rotated(self.isometry.rotation) - .translated(self.isometry.translation); + let segment = Segment2d::new(self.point1, self.point2).transformed(self.isometry); if self.draw_arrow { self.gizmos diff --git a/crates/bevy_gizmos/src/primitives/dim3.rs b/crates/bevy_gizmos/src/primitives/dim3.rs index 31a2274e84..898850ddea 100644 --- a/crates/bevy_gizmos/src/primitives/dim3.rs +++ b/crates/bevy_gizmos/src/primitives/dim3.rs @@ -228,10 +228,7 @@ where return; } - let isometry: Isometry3d = isometry.into(); - let transformed = primitive - .rotated(isometry.rotation) - .translated(isometry.translation.into()); + let transformed = primitive.transformed(isometry); self.line(transformed.point1(), transformed.point2(), color); } } diff --git a/crates/bevy_gizmos/src/retained.rs b/crates/bevy_gizmos/src/retained.rs index 435f417552..88610b9744 100644 --- a/crates/bevy_gizmos/src/retained.rs +++ b/crates/bevy_gizmos/src/retained.rs @@ -3,11 +3,8 @@ use core::ops::{Deref, DerefMut}; use bevy_asset::Handle; -use bevy_ecs::{ - component::{require, Component}, - reflect::ReflectComponent, -}; -use bevy_reflect::Reflect; +use bevy_ecs::{component::Component, reflect::ReflectComponent}; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_transform::components::Transform; #[cfg(feature = "bevy_render")] @@ -76,7 +73,7 @@ impl DerefMut for GizmoAsset { /// /// [`Gizmos`]: crate::gizmos::Gizmos #[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component)] +#[reflect(Component, Clone, Default)] #[require(Transform)] pub struct Gizmo { /// The handle to the gizmo to draw. diff --git a/crates/bevy_gizmos/src/rounded_box.rs b/crates/bevy_gizmos/src/rounded_box.rs index 6f0df7ac0e..530d4f8617 100644 --- a/crates/bevy_gizmos/src/rounded_box.rs +++ b/crates/bevy_gizmos/src/rounded_box.rs @@ -238,10 +238,9 @@ where /// # Arguments /// /// - `isometry` defines the translation and rotation of the rectangle. - /// - the translation specifies the center of the rectangle - /// - defines orientation of the rectangle, by default we - /// assume the rectangle is contained in a plane parallel - /// to the XY plane. + /// - the translation specifies the center of the rectangle + /// - defines orientation of the rectangle, by default we assume the rectangle is contained in + /// a plane parallel to the XY plane. /// - `size`: defines the size of the rectangle. This refers to the 'outer size', similar to a bounding box. /// - `color`: color of the rectangle /// @@ -249,7 +248,7 @@ where /// /// - The corner radius can be adjusted with the `.corner_radius(...)` method. /// - The resolution of the arcs at each corner (i.e. the level of detail) can be adjusted with the - /// `.arc_resolution(...)` method. + /// `.arc_resolution(...)` method. /// /// # Example /// ``` @@ -293,9 +292,8 @@ where /// # Arguments /// /// - `isometry` defines the translation and rotation of the rectangle. - /// - the translation specifies the center of the rectangle - /// - defines orientation of the rectangle, by default we - /// assume the rectangle aligned with all axes. + /// - the translation specifies the center of the rectangle + /// - defines orientation of the rectangle, by default we assume the rectangle aligned with all axes. /// - `size`: defines the size of the rectangle. This refers to the 'outer size', similar to a bounding box. /// - `color`: color of the rectangle /// @@ -303,7 +301,7 @@ where /// /// - The corner radius can be adjusted with the `.corner_radius(...)` method. /// - The resolution of the arcs at each corner (i.e. the level of detail) can be adjusted with the - /// `.arc_resolution(...)` method. + /// `.arc_resolution(...)` method. /// /// # Example /// ``` @@ -351,9 +349,8 @@ where /// # Arguments /// /// - `isometry` defines the translation and rotation of the cuboid. - /// - the translation specifies the center of the cuboid - /// - defines orientation of the cuboid, by default we - /// assume the cuboid aligned with all axes. + /// - the translation specifies the center of the cuboid + /// - defines orientation of the cuboid, by default we assume the cuboid aligned with all axes. /// - `size`: defines the size of the cuboid. This refers to the 'outer size', similar to a bounding box. /// - `color`: color of the cuboid /// @@ -361,7 +358,7 @@ where /// /// - The edge radius can be adjusted with the `.edge_radius(...)` method. /// - The resolution of the arcs at each edge (i.e. the level of detail) can be adjusted with the - /// `.arc_resolution(...)` method. + /// `.arc_resolution(...)` method. /// /// # Example /// ``` diff --git a/crates/bevy_gltf/Cargo.toml b/crates/bevy_gltf/Cargo.toml index b7bca04797..a67ab2276c 100644 --- a/crates/bevy_gltf/Cargo.toml +++ b/crates/bevy_gltf/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_gltf" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Bevy Engine GLTF loading" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -9,13 +9,12 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [features] -dds = ["bevy_render/dds", "bevy_image/dds", "bevy_core_pipeline/dds"] pbr_transmission_textures = ["bevy_pbr/pbr_transmission_textures"] pbr_multi_layer_material_textures = [ "bevy_pbr/pbr_multi_layer_material_textures", ] pbr_anisotropy_texture = ["bevy_pbr/pbr_anisotropy_texture"] -pbr_specular_textures = [] +pbr_specular_textures = ["bevy_pbr/pbr_specular_textures"] [dependencies] # bevy @@ -27,10 +26,9 @@ bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.16.0-dev" } bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_mesh = { path = "../bevy_mesh", version = "0.16.0-dev" } bevy_pbr = { path = "../bevy_pbr", version = "0.16.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ - "bevy", -] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } bevy_scene = { path = "../bevy_scene", version = "0.16.0-dev", features = [ "bevy_render", @@ -38,7 +36,7 @@ bevy_scene = { path = "../bevy_scene", version = "0.16.0-dev", features = [ bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", "serialize", ] } @@ -60,7 +58,7 @@ gltf = { version = "1.4.0", default-features = false, features = [ thiserror = { version = "2", default-features = false } base64 = "0.22.0" fixedbitset = "0.5" -itertools = "0.13" +itertools = "0.14" percent-encoding = "2.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1" diff --git a/crates/bevy_gltf/src/assets.rs b/crates/bevy_gltf/src/assets.rs new file mode 100644 index 0000000000..fe3303dd81 --- /dev/null +++ b/crates/bevy_gltf/src/assets.rs @@ -0,0 +1,315 @@ +//! Representation of assets present in a glTF file + +#[cfg(feature = "bevy_animation")] +use bevy_animation::AnimationClip; +use bevy_asset::{Asset, Handle}; +use bevy_ecs::{component::Component, reflect::ReflectComponent}; +use bevy_mesh::{skinning::SkinnedMeshInverseBindposes, Mesh}; +use bevy_pbr::StandardMaterial; +use bevy_platform::collections::HashMap; +use bevy_reflect::{prelude::ReflectDefault, Reflect, TypePath}; +use bevy_scene::Scene; + +use crate::GltfAssetLabel; + +/// Representation of a loaded glTF file. +#[derive(Asset, Debug, TypePath)] +pub struct Gltf { + /// All scenes loaded from the glTF file. + pub scenes: Vec>, + /// Named scenes loaded from the glTF file. + pub named_scenes: HashMap, Handle>, + /// All meshes loaded from the glTF file. + pub meshes: Vec>, + /// Named meshes loaded from the glTF file. + pub named_meshes: HashMap, Handle>, + /// All materials loaded from the glTF file. + pub materials: Vec>, + /// Named materials loaded from the glTF file. + pub named_materials: HashMap, Handle>, + /// All nodes loaded from the glTF file. + pub nodes: Vec>, + /// Named nodes loaded from the glTF file. + pub named_nodes: HashMap, Handle>, + /// All skins loaded from the glTF file. + pub skins: Vec>, + /// Named skins loaded from the glTF file. + pub named_skins: HashMap, Handle>, + /// Default scene to be displayed. + pub default_scene: Option>, + /// All animations loaded from the glTF file. + #[cfg(feature = "bevy_animation")] + pub animations: Vec>, + /// Named animations loaded from the glTF file. + #[cfg(feature = "bevy_animation")] + pub named_animations: HashMap, Handle>, + /// The gltf root of the gltf asset, see . Only has a value when `GltfLoaderSettings::include_source` is true. + pub source: Option, +} + +/// A glTF mesh, which may consist of multiple [`GltfPrimitives`](GltfPrimitive) +/// and an optional [`GltfExtras`]. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-mesh). +#[derive(Asset, Debug, Clone, TypePath)] +pub struct GltfMesh { + /// Index of the mesh inside the scene + pub index: usize, + /// Computed name for a mesh - either a user defined mesh name from gLTF or a generated name from index + pub name: String, + /// Primitives of the glTF mesh. + pub primitives: Vec, + /// Additional data. + pub extras: Option, +} + +impl GltfMesh { + /// Create a mesh extracting name and index from glTF def + pub fn new( + mesh: &gltf::Mesh, + primitives: Vec, + extras: Option, + ) -> Self { + Self { + index: mesh.index(), + name: if let Some(name) = mesh.name() { + name.to_string() + } else { + format!("GltfMesh{}", mesh.index()) + }, + primitives, + extras, + } + } + + /// Subasset label for this mesh within the gLTF parent asset. + pub fn asset_label(&self) -> GltfAssetLabel { + GltfAssetLabel::Mesh(self.index) + } +} + +/// A glTF node with all of its child nodes, its [`GltfMesh`], +/// [`Transform`](bevy_transform::prelude::Transform), its optional [`GltfSkin`] +/// and an optional [`GltfExtras`]. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-node). +#[derive(Asset, Debug, Clone, TypePath)] +pub struct GltfNode { + /// Index of the node inside the scene + pub index: usize, + /// Computed name for a node - either a user defined node name from gLTF or a generated name from index + pub name: String, + /// Direct children of the node. + pub children: Vec>, + /// Mesh of the node. + pub mesh: Option>, + /// Skin of the node. + pub skin: Option>, + /// Local transform. + pub transform: bevy_transform::prelude::Transform, + /// Is this node used as an animation root + #[cfg(feature = "bevy_animation")] + pub is_animation_root: bool, + /// Additional data. + pub extras: Option, +} + +impl GltfNode { + /// Create a node extracting name and index from glTF def + pub fn new( + node: &gltf::Node, + children: Vec>, + mesh: Option>, + transform: bevy_transform::prelude::Transform, + skin: Option>, + extras: Option, + ) -> Self { + Self { + index: node.index(), + name: if let Some(name) = node.name() { + name.to_string() + } else { + format!("GltfNode{}", node.index()) + }, + children, + mesh, + transform, + skin, + #[cfg(feature = "bevy_animation")] + is_animation_root: false, + extras, + } + } + + /// Create a node with animation root mark + #[cfg(feature = "bevy_animation")] + pub fn with_animation_root(self, is_animation_root: bool) -> Self { + Self { + is_animation_root, + ..self + } + } + + /// Subasset label for this node within the gLTF parent asset. + pub fn asset_label(&self) -> GltfAssetLabel { + GltfAssetLabel::Node(self.index) + } +} + +/// Part of a [`GltfMesh`] that consists of a [`Mesh`], an optional [`StandardMaterial`] and [`GltfExtras`]. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-mesh-primitive). +#[derive(Asset, Debug, Clone, TypePath)] +pub struct GltfPrimitive { + /// Index of the primitive inside the mesh + pub index: usize, + /// Index of the parent [`GltfMesh`] of this primitive + pub parent_mesh_index: usize, + /// Computed name for a primitive - either a user defined primitive name from gLTF or a generated name from index + pub name: String, + /// Topology to be rendered. + pub mesh: Handle, + /// Material to apply to the `mesh`. + pub material: Option>, + /// Additional data. + pub extras: Option, + /// Additional data of the `material`. + pub material_extras: Option, +} + +impl GltfPrimitive { + /// Create a primitive extracting name and index from glTF def + pub fn new( + gltf_mesh: &gltf::Mesh, + gltf_primitive: &gltf::Primitive, + mesh: Handle, + material: Option>, + extras: Option, + material_extras: Option, + ) -> Self { + GltfPrimitive { + index: gltf_primitive.index(), + parent_mesh_index: gltf_mesh.index(), + name: { + let mesh_name = gltf_mesh.name().unwrap_or("Mesh"); + if gltf_mesh.primitives().len() > 1 { + format!("{}.{}", mesh_name, gltf_primitive.index()) + } else { + mesh_name.to_string() + } + }, + mesh, + material, + extras, + material_extras, + } + } + + /// Subasset label for this primitive within its parent [`GltfMesh`] within the gLTF parent asset. + pub fn asset_label(&self) -> GltfAssetLabel { + GltfAssetLabel::Primitive { + mesh: self.parent_mesh_index, + primitive: self.index, + } + } +} + +/// A glTF skin with all of its joint nodes, [`SkinnedMeshInversiveBindposes`](bevy_mesh::skinning::SkinnedMeshInverseBindposes) +/// and an optional [`GltfExtras`]. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-skin). +#[derive(Asset, Debug, Clone, TypePath)] +pub struct GltfSkin { + /// Index of the skin inside the scene + pub index: usize, + /// Computed name for a skin - either a user defined skin name from gLTF or a generated name from index + pub name: String, + /// All the nodes that form this skin. + pub joints: Vec>, + /// Inverse-bind matrices of this skin. + pub inverse_bind_matrices: Handle, + /// Additional data. + pub extras: Option, +} + +impl GltfSkin { + /// Create a skin extracting name and index from glTF def + pub fn new( + skin: &gltf::Skin, + joints: Vec>, + inverse_bind_matrices: Handle, + extras: Option, + ) -> Self { + Self { + index: skin.index(), + name: if let Some(name) = skin.name() { + name.to_string() + } else { + format!("GltfSkin{}", skin.index()) + }, + joints, + inverse_bind_matrices, + extras, + } + } + + /// Subasset label for this skin within the gLTF parent asset. + pub fn asset_label(&self) -> GltfAssetLabel { + GltfAssetLabel::Skin(self.index) + } +} + +/// Additional untyped data that can be present on most glTF types at the primitive level. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). +#[derive(Clone, Debug, Reflect, Default, Component)] +#[reflect(Component, Clone, Default, Debug)] +pub struct GltfExtras { + /// Content of the extra data. + pub value: String, +} + +impl From<&serde_json::value::RawValue> for GltfExtras { + fn from(value: &serde_json::value::RawValue) -> Self { + GltfExtras { + value: value.get().to_string(), + } + } +} + +/// Additional untyped data that can be present on most glTF types at the scene level. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). +#[derive(Clone, Debug, Reflect, Default, Component)] +#[reflect(Component, Clone, Default, Debug)] +pub struct GltfSceneExtras { + /// Content of the extra data. + pub value: String, +} + +/// Additional untyped data that can be present on most glTF types at the mesh level. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). +#[derive(Clone, Debug, Reflect, Default, Component)] +#[reflect(Component, Clone, Default, Debug)] +pub struct GltfMeshExtras { + /// Content of the extra data. + pub value: String, +} + +/// Additional untyped data that can be present on most glTF types at the material level. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). +#[derive(Clone, Debug, Reflect, Default, Component)] +#[reflect(Component, Clone, Default, Debug)] +pub struct GltfMaterialExtras { + /// Content of the extra data. + pub value: String, +} + +/// The material name of a glTF primitive. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-material). +#[derive(Clone, Debug, Reflect, Default, Component)] +#[reflect(Component, Clone)] +pub struct GltfMaterialName(pub String); diff --git a/crates/bevy_gltf/src/label.rs b/crates/bevy_gltf/src/label.rs new file mode 100644 index 0000000000..b74d5ab2d6 --- /dev/null +++ b/crates/bevy_gltf/src/label.rs @@ -0,0 +1,127 @@ +//! Labels that can be used to load part of a glTF + +use bevy_asset::AssetPath; + +/// Labels that can be used to load part of a glTF +/// +/// You can use [`GltfAssetLabel::from_asset`] to add it to an asset path +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// # use bevy_asset::prelude::*; +/// # use bevy_scene::prelude::*; +/// # use bevy_gltf::prelude::*; +/// +/// fn load_gltf_scene(asset_server: Res) { +/// let gltf_scene: Handle = asset_server.load(GltfAssetLabel::Scene(0).from_asset("models/FlightHelmet/FlightHelmet.gltf")); +/// } +/// ``` +/// +/// Or when formatting a string for the path +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// # use bevy_asset::prelude::*; +/// # use bevy_scene::prelude::*; +/// # use bevy_gltf::prelude::*; +/// +/// fn load_gltf_scene(asset_server: Res) { +/// let gltf_scene: Handle = asset_server.load(format!("models/FlightHelmet/FlightHelmet.gltf#{}", GltfAssetLabel::Scene(0))); +/// } +/// ``` +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum GltfAssetLabel { + /// `Scene{}`: glTF Scene as a Bevy [`Scene`](bevy_scene::Scene) + Scene(usize), + /// `Node{}`: glTF Node as a [`GltfNode`](crate::GltfNode) + Node(usize), + /// `Mesh{}`: glTF Mesh as a [`GltfMesh`](crate::GltfMesh) + Mesh(usize), + /// `Mesh{}/Primitive{}`: glTF Primitive as a Bevy [`Mesh`](bevy_mesh::Mesh) + Primitive { + /// Index of the mesh for this primitive + mesh: usize, + /// Index of this primitive in its parent mesh + primitive: usize, + }, + /// `Mesh{}/Primitive{}/MorphTargets`: Morph target animation data for a glTF Primitive + /// as a Bevy [`Image`](bevy_image::prelude::Image) + MorphTarget { + /// Index of the mesh for this primitive + mesh: usize, + /// Index of this primitive in its parent mesh + primitive: usize, + }, + /// `Texture{}`: glTF Texture as a Bevy [`Image`](bevy_image::prelude::Image) + Texture(usize), + /// `Material{}`: glTF Material as a Bevy [`StandardMaterial`](bevy_pbr::StandardMaterial) + Material { + /// Index of this material + index: usize, + /// Used to set the [`Face`](bevy_render::render_resource::Face) of the material, + /// useful if it is used with negative scale + is_scale_inverted: bool, + }, + /// `DefaultMaterial`: glTF's default Material as a + /// Bevy [`StandardMaterial`](bevy_pbr::StandardMaterial) + DefaultMaterial, + /// `Animation{}`: glTF Animation as Bevy [`AnimationClip`](bevy_animation::AnimationClip) + Animation(usize), + /// `Skin{}`: glTF mesh skin as [`GltfSkin`](crate::GltfSkin) + Skin(usize), + /// `Skin{}/InverseBindMatrices`: glTF mesh skin matrices as Bevy + /// [`SkinnedMeshInverseBindposes`](bevy_mesh::skinning::SkinnedMeshInverseBindposes) + InverseBindMatrices(usize), +} + +impl core::fmt::Display for GltfAssetLabel { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + GltfAssetLabel::Scene(index) => f.write_str(&format!("Scene{index}")), + GltfAssetLabel::Node(index) => f.write_str(&format!("Node{index}")), + GltfAssetLabel::Mesh(index) => f.write_str(&format!("Mesh{index}")), + GltfAssetLabel::Primitive { mesh, primitive } => { + f.write_str(&format!("Mesh{mesh}/Primitive{primitive}")) + } + GltfAssetLabel::MorphTarget { mesh, primitive } => { + f.write_str(&format!("Mesh{mesh}/Primitive{primitive}/MorphTargets")) + } + GltfAssetLabel::Texture(index) => f.write_str(&format!("Texture{index}")), + GltfAssetLabel::Material { + index, + is_scale_inverted, + } => f.write_str(&format!( + "Material{index}{}", + if *is_scale_inverted { + " (inverted)" + } else { + "" + } + )), + GltfAssetLabel::DefaultMaterial => f.write_str("DefaultMaterial"), + GltfAssetLabel::Animation(index) => f.write_str(&format!("Animation{index}")), + GltfAssetLabel::Skin(index) => f.write_str(&format!("Skin{index}")), + GltfAssetLabel::InverseBindMatrices(index) => { + f.write_str(&format!("Skin{index}/InverseBindMatrices")) + } + } + } +} + +impl GltfAssetLabel { + /// Add this label to an asset path + /// + /// ``` + /// # use bevy_ecs::prelude::*; + /// # use bevy_asset::prelude::*; + /// # use bevy_scene::prelude::*; + /// # use bevy_gltf::prelude::*; + /// + /// fn load_gltf_scene(asset_server: Res) { + /// let gltf_scene: Handle = asset_server.load(GltfAssetLabel::Scene(0).from_asset("models/FlightHelmet/FlightHelmet.gltf")); + /// } + /// ``` + pub fn from_asset(&self, path: impl Into>) -> AssetPath<'static> { + path.into().with_label(self.to_string()) + } +} diff --git a/crates/bevy_gltf/src/lib.rs b/crates/bevy_gltf/src/lib.rs index 74664b3e37..ebcf49744a 100644 --- a/crates/bevy_gltf/src/lib.rs +++ b/crates/bevy_gltf/src/lib.rs @@ -90,36 +90,31 @@ //! //! You can use [`GltfAssetLabel`] to ensure you are using the correct label. -extern crate alloc; - -#[cfg(feature = "bevy_animation")] -use bevy_animation::AnimationClip; -use bevy_platform_support::collections::HashMap; - +mod assets; +mod label; mod loader; mod vertex_attributes; -pub use loader::*; + +extern crate alloc; + +use bevy_platform::collections::HashMap; use bevy_app::prelude::*; -use bevy_asset::{Asset, AssetApp, AssetPath, Handle}; -use bevy_ecs::{prelude::Component, reflect::ReflectComponent}; +use bevy_asset::AssetApp; use bevy_image::CompressedImageFormats; -use bevy_pbr::StandardMaterial; -use bevy_reflect::{std_traits::ReflectDefault, Reflect, TypePath}; -use bevy_render::{ - mesh::{skinning::SkinnedMeshInverseBindposes, Mesh, MeshVertexAttribute}, - renderer::RenderDevice, -}; -use bevy_scene::Scene; +use bevy_mesh::MeshVertexAttribute; +use bevy_render::renderer::RenderDevice; /// The glTF prelude. /// /// This includes the most common types in this crate, re-exported for your convenience. pub mod prelude { #[doc(hidden)] - pub use crate::{Gltf, GltfAssetLabel, GltfExtras}; + pub use crate::{assets::Gltf, assets::GltfExtras, label::GltfAssetLabel}; } +pub use {assets::*, label::GltfAssetLabel, loader::*}; + /// Adds support for glTF file loading to the app. #[derive(Default)] pub struct GltfPlugin { @@ -168,417 +163,3 @@ impl Plugin for GltfPlugin { }); } } - -/// Representation of a loaded glTF file. -#[derive(Asset, Debug, TypePath)] -pub struct Gltf { - /// All scenes loaded from the glTF file. - pub scenes: Vec>, - /// Named scenes loaded from the glTF file. - pub named_scenes: HashMap, Handle>, - /// All meshes loaded from the glTF file. - pub meshes: Vec>, - /// Named meshes loaded from the glTF file. - pub named_meshes: HashMap, Handle>, - /// All materials loaded from the glTF file. - pub materials: Vec>, - /// Named materials loaded from the glTF file. - pub named_materials: HashMap, Handle>, - /// All nodes loaded from the glTF file. - pub nodes: Vec>, - /// Named nodes loaded from the glTF file. - pub named_nodes: HashMap, Handle>, - /// All skins loaded from the glTF file. - pub skins: Vec>, - /// Named skins loaded from the glTF file. - pub named_skins: HashMap, Handle>, - /// Default scene to be displayed. - pub default_scene: Option>, - /// All animations loaded from the glTF file. - #[cfg(feature = "bevy_animation")] - pub animations: Vec>, - /// Named animations loaded from the glTF file. - #[cfg(feature = "bevy_animation")] - pub named_animations: HashMap, Handle>, - /// The gltf root of the gltf asset, see . Only has a value when `GltfLoaderSettings::include_source` is true. - pub source: Option, -} - -/// A glTF node with all of its child nodes, its [`GltfMesh`], -/// [`Transform`](bevy_transform::prelude::Transform), its optional [`GltfSkin`] -/// and an optional [`GltfExtras`]. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-node). -#[derive(Asset, Debug, Clone, TypePath)] -pub struct GltfNode { - /// Index of the node inside the scene - pub index: usize, - /// Computed name for a node - either a user defined node name from gLTF or a generated name from index - pub name: String, - /// Direct children of the node. - pub children: Vec>, - /// Mesh of the node. - pub mesh: Option>, - /// Skin of the node. - pub skin: Option>, - /// Local transform. - pub transform: bevy_transform::prelude::Transform, - /// Is this node used as an animation root - #[cfg(feature = "bevy_animation")] - pub is_animation_root: bool, - /// Additional data. - pub extras: Option, -} - -impl GltfNode { - /// Create a node extracting name and index from glTF def - pub fn new( - node: &gltf::Node, - children: Vec>, - mesh: Option>, - transform: bevy_transform::prelude::Transform, - skin: Option>, - extras: Option, - ) -> Self { - Self { - index: node.index(), - name: if let Some(name) = node.name() { - name.to_string() - } else { - format!("GltfNode{}", node.index()) - }, - children, - mesh, - transform, - skin, - #[cfg(feature = "bevy_animation")] - is_animation_root: false, - extras, - } - } - - /// Create a node with animation root mark - #[cfg(feature = "bevy_animation")] - pub fn with_animation_root(self, is_animation_root: bool) -> Self { - Self { - is_animation_root, - ..self - } - } - - /// Subasset label for this node within the gLTF parent asset. - pub fn asset_label(&self) -> GltfAssetLabel { - GltfAssetLabel::Node(self.index) - } -} - -/// A glTF skin with all of its joint nodes, [`SkinnedMeshInversiveBindposes`](bevy_render::mesh::skinning::SkinnedMeshInverseBindposes) -/// and an optional [`GltfExtras`]. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-skin). -#[derive(Asset, Debug, Clone, TypePath)] -pub struct GltfSkin { - /// Index of the skin inside the scene - pub index: usize, - /// Computed name for a skin - either a user defined skin name from gLTF or a generated name from index - pub name: String, - /// All the nodes that form this skin. - pub joints: Vec>, - /// Inverse-bind matrices of this skin. - pub inverse_bind_matrices: Handle, - /// Additional data. - pub extras: Option, -} - -impl GltfSkin { - /// Create a skin extracting name and index from glTF def - pub fn new( - skin: &gltf::Skin, - joints: Vec>, - inverse_bind_matrices: Handle, - extras: Option, - ) -> Self { - Self { - index: skin.index(), - name: if let Some(name) = skin.name() { - name.to_string() - } else { - format!("GltfSkin{}", skin.index()) - }, - joints, - inverse_bind_matrices, - extras, - } - } - - /// Subasset label for this skin within the gLTF parent asset. - pub fn asset_label(&self) -> GltfAssetLabel { - GltfAssetLabel::Skin(self.index) - } -} - -/// A glTF mesh, which may consist of multiple [`GltfPrimitives`](GltfPrimitive) -/// and an optional [`GltfExtras`]. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-mesh). -#[derive(Asset, Debug, Clone, TypePath)] -pub struct GltfMesh { - /// Index of the mesh inside the scene - pub index: usize, - /// Computed name for a mesh - either a user defined mesh name from gLTF or a generated name from index - pub name: String, - /// Primitives of the glTF mesh. - pub primitives: Vec, - /// Additional data. - pub extras: Option, -} - -impl GltfMesh { - /// Create a mesh extracting name and index from glTF def - pub fn new( - mesh: &gltf::Mesh, - primitives: Vec, - extras: Option, - ) -> Self { - Self { - index: mesh.index(), - name: if let Some(name) = mesh.name() { - name.to_string() - } else { - format!("GltfMesh{}", mesh.index()) - }, - primitives, - extras, - } - } - - /// Subasset label for this mesh within the gLTF parent asset. - pub fn asset_label(&self) -> GltfAssetLabel { - GltfAssetLabel::Mesh(self.index) - } -} - -/// Part of a [`GltfMesh`] that consists of a [`Mesh`], an optional [`StandardMaterial`] and [`GltfExtras`]. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-mesh-primitive). -#[derive(Asset, Debug, Clone, TypePath)] -pub struct GltfPrimitive { - /// Index of the primitive inside the mesh - pub index: usize, - /// Index of the parent [`GltfMesh`] of this primitive - pub parent_mesh_index: usize, - /// Computed name for a primitive - either a user defined primitive name from gLTF or a generated name from index - pub name: String, - /// Topology to be rendered. - pub mesh: Handle, - /// Material to apply to the `mesh`. - pub material: Option>, - /// Additional data. - pub extras: Option, - /// Additional data of the `material`. - pub material_extras: Option, -} - -impl GltfPrimitive { - /// Create a primitive extracting name and index from glTF def - pub fn new( - gltf_mesh: &gltf::Mesh, - gltf_primitive: &gltf::Primitive, - mesh: Handle, - material: Option>, - extras: Option, - material_extras: Option, - ) -> Self { - GltfPrimitive { - index: gltf_primitive.index(), - parent_mesh_index: gltf_mesh.index(), - name: { - let mesh_name = gltf_mesh.name().unwrap_or("Mesh"); - if gltf_mesh.primitives().len() > 1 { - format!("{}.{}", mesh_name, gltf_primitive.index()) - } else { - mesh_name.to_string() - } - }, - mesh, - material, - extras, - material_extras, - } - } - - /// Subasset label for this primitive within its parent [`GltfMesh`] within the gLTF parent asset. - pub fn asset_label(&self) -> GltfAssetLabel { - GltfAssetLabel::Primitive { - mesh: self.parent_mesh_index, - primitive: self.index, - } - } -} - -/// Additional untyped data that can be present on most glTF types at the primitive level. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). -#[derive(Clone, Debug, Reflect, Default, Component)] -#[reflect(Component, Default, Debug)] -pub struct GltfExtras { - /// Content of the extra data. - pub value: String, -} - -/// Additional untyped data that can be present on most glTF types at the scene level. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). -#[derive(Clone, Debug, Reflect, Default, Component)] -#[reflect(Component, Default, Debug)] -pub struct GltfSceneExtras { - /// Content of the extra data. - pub value: String, -} - -/// Additional untyped data that can be present on most glTF types at the mesh level. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). -#[derive(Clone, Debug, Reflect, Default, Component)] -#[reflect(Component, Default, Debug)] -pub struct GltfMeshExtras { - /// Content of the extra data. - pub value: String, -} - -/// Additional untyped data that can be present on most glTF types at the material level. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). -#[derive(Clone, Debug, Reflect, Default, Component)] -#[reflect(Component, Default, Debug)] -pub struct GltfMaterialExtras { - /// Content of the extra data. - pub value: String, -} - -/// The material name of a glTF primitive. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-material). -#[derive(Clone, Debug, Reflect, Default, Component)] -#[reflect(Component)] -pub struct GltfMaterialName(pub String); - -/// Labels that can be used to load part of a glTF -/// -/// You can use [`GltfAssetLabel::from_asset`] to add it to an asset path -/// -/// ``` -/// # use bevy_ecs::prelude::*; -/// # use bevy_asset::prelude::*; -/// # use bevy_scene::prelude::*; -/// # use bevy_gltf::prelude::*; -/// -/// fn load_gltf_scene(asset_server: Res) { -/// let gltf_scene: Handle = asset_server.load(GltfAssetLabel::Scene(0).from_asset("models/FlightHelmet/FlightHelmet.gltf")); -/// } -/// ``` -/// -/// Or when formatting a string for the path -/// -/// ``` -/// # use bevy_ecs::prelude::*; -/// # use bevy_asset::prelude::*; -/// # use bevy_scene::prelude::*; -/// # use bevy_gltf::prelude::*; -/// -/// fn load_gltf_scene(asset_server: Res) { -/// let gltf_scene: Handle = asset_server.load(format!("models/FlightHelmet/FlightHelmet.gltf#{}", GltfAssetLabel::Scene(0))); -/// } -/// ``` -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum GltfAssetLabel { - /// `Scene{}`: glTF Scene as a Bevy `Scene` - Scene(usize), - /// `Node{}`: glTF Node as a `GltfNode` - Node(usize), - /// `Mesh{}`: glTF Mesh as a `GltfMesh` - Mesh(usize), - /// `Mesh{}/Primitive{}`: glTF Primitive as a Bevy `Mesh` - Primitive { - /// Index of the mesh for this primitive - mesh: usize, - /// Index of this primitive in its parent mesh - primitive: usize, - }, - /// `Mesh{}/Primitive{}/MorphTargets`: Morph target animation data for a glTF Primitive - MorphTarget { - /// Index of the mesh for this primitive - mesh: usize, - /// Index of this primitive in its parent mesh - primitive: usize, - }, - /// `Texture{}`: glTF Texture as a Bevy `Image` - Texture(usize), - /// `Material{}`: glTF Material as a Bevy `StandardMaterial` - Material { - /// Index of this material - index: usize, - /// Used to set the [`Face`](bevy_render::render_resource::Face) of the material, useful if it is used with negative scale - is_scale_inverted: bool, - }, - /// `DefaultMaterial`: as above, if the glTF file contains a default material with no index - DefaultMaterial, - /// `Animation{}`: glTF Animation as Bevy `AnimationClip` - Animation(usize), - /// `Skin{}`: glTF mesh skin as `GltfSkin` - Skin(usize), - /// `Skin{}/InverseBindMatrices`: glTF mesh skin matrices as Bevy `SkinnedMeshInverseBindposes` - InverseBindMatrices(usize), -} - -impl core::fmt::Display for GltfAssetLabel { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - GltfAssetLabel::Scene(index) => f.write_str(&format!("Scene{index}")), - GltfAssetLabel::Node(index) => f.write_str(&format!("Node{index}")), - GltfAssetLabel::Mesh(index) => f.write_str(&format!("Mesh{index}")), - GltfAssetLabel::Primitive { mesh, primitive } => { - f.write_str(&format!("Mesh{mesh}/Primitive{primitive}")) - } - GltfAssetLabel::MorphTarget { mesh, primitive } => { - f.write_str(&format!("Mesh{mesh}/Primitive{primitive}/MorphTargets")) - } - GltfAssetLabel::Texture(index) => f.write_str(&format!("Texture{index}")), - GltfAssetLabel::Material { - index, - is_scale_inverted, - } => f.write_str(&format!( - "Material{index}{}", - if *is_scale_inverted { - " (inverted)" - } else { - "" - } - )), - GltfAssetLabel::DefaultMaterial => f.write_str("DefaultMaterial"), - GltfAssetLabel::Animation(index) => f.write_str(&format!("Animation{index}")), - GltfAssetLabel::Skin(index) => f.write_str(&format!("Skin{index}")), - GltfAssetLabel::InverseBindMatrices(index) => { - f.write_str(&format!("Skin{index}/InverseBindMatrices")) - } - } - } -} - -impl GltfAssetLabel { - /// Add this label to an asset path - /// - /// ``` - /// # use bevy_ecs::prelude::*; - /// # use bevy_asset::prelude::*; - /// # use bevy_scene::prelude::*; - /// # use bevy_gltf::prelude::*; - /// - /// fn load_gltf_scene(asset_server: Res) { - /// let gltf_scene: Handle = asset_server.load(GltfAssetLabel::Scene(0).from_asset("models/FlightHelmet/FlightHelmet.gltf")); - /// } - /// ``` - pub fn from_asset(&self, path: impl Into>) -> AssetPath<'static> { - path.into().with_label(self.to_string()) - } -} diff --git a/crates/bevy_gltf/src/loader/extensions/khr_materials_anisotropy.rs b/crates/bevy_gltf/src/loader/extensions/khr_materials_anisotropy.rs new file mode 100644 index 0000000000..f859cfba84 --- /dev/null +++ b/crates/bevy_gltf/src/loader/extensions/khr_materials_anisotropy.rs @@ -0,0 +1,71 @@ +use bevy_asset::LoadContext; + +use gltf::{Document, Material}; + +use serde_json::Value; + +#[cfg(feature = "pbr_anisotropy_texture")] +use { + crate::loader::gltf_ext::{material::uv_channel, texture::texture_handle_from_info}, + bevy_asset::Handle, + bevy_image::Image, + bevy_pbr::UvChannel, + gltf::json::texture::Info, + serde_json::value, +}; + +/// Parsed data from the `KHR_materials_anisotropy` extension. +/// +/// See the specification: +/// +#[derive(Default)] +pub(crate) struct AnisotropyExtension { + pub(crate) anisotropy_strength: Option, + pub(crate) anisotropy_rotation: Option, + #[cfg(feature = "pbr_anisotropy_texture")] + pub(crate) anisotropy_channel: UvChannel, + #[cfg(feature = "pbr_anisotropy_texture")] + pub(crate) anisotropy_texture: Option>, +} + +impl AnisotropyExtension { + #[expect( + clippy::allow_attributes, + reason = "`unused_variables` is not always linted" + )] + #[allow( + unused_variables, + reason = "Depending on what features are used to compile this crate, certain parameters may end up unused." + )] + pub(crate) fn parse( + load_context: &mut LoadContext, + document: &Document, + material: &Material, + ) -> Option { + let extension = material + .extensions()? + .get("KHR_materials_anisotropy")? + .as_object()?; + + #[cfg(feature = "pbr_anisotropy_texture")] + let (anisotropy_channel, anisotropy_texture) = extension + .get("anisotropyTexture") + .and_then(|value| value::from_value::(value.clone()).ok()) + .map(|json_info| { + ( + uv_channel(material, "anisotropy", json_info.tex_coord), + texture_handle_from_info(&json_info, document, load_context), + ) + }) + .unzip(); + + Some(AnisotropyExtension { + anisotropy_strength: extension.get("anisotropyStrength").and_then(Value::as_f64), + anisotropy_rotation: extension.get("anisotropyRotation").and_then(Value::as_f64), + #[cfg(feature = "pbr_anisotropy_texture")] + anisotropy_channel: anisotropy_channel.unwrap_or_default(), + #[cfg(feature = "pbr_anisotropy_texture")] + anisotropy_texture, + }) + } +} diff --git a/crates/bevy_gltf/src/loader/extensions/khr_materials_clearcoat.rs b/crates/bevy_gltf/src/loader/extensions/khr_materials_clearcoat.rs new file mode 100644 index 0000000000..5128487ca4 --- /dev/null +++ b/crates/bevy_gltf/src/loader/extensions/khr_materials_clearcoat.rs @@ -0,0 +1,104 @@ +use bevy_asset::LoadContext; + +use gltf::{Document, Material}; + +use serde_json::Value; + +#[cfg(feature = "pbr_multi_layer_material_textures")] +use { + crate::loader::gltf_ext::material::parse_material_extension_texture, bevy_asset::Handle, + bevy_image::Image, bevy_pbr::UvChannel, +}; + +/// Parsed data from the `KHR_materials_clearcoat` extension. +/// +/// See the specification: +/// +#[derive(Default)] +pub(crate) struct ClearcoatExtension { + pub(crate) clearcoat_factor: Option, + #[cfg(feature = "pbr_multi_layer_material_textures")] + pub(crate) clearcoat_channel: UvChannel, + #[cfg(feature = "pbr_multi_layer_material_textures")] + pub(crate) clearcoat_texture: Option>, + pub(crate) clearcoat_roughness_factor: Option, + #[cfg(feature = "pbr_multi_layer_material_textures")] + pub(crate) clearcoat_roughness_channel: UvChannel, + #[cfg(feature = "pbr_multi_layer_material_textures")] + pub(crate) clearcoat_roughness_texture: Option>, + #[cfg(feature = "pbr_multi_layer_material_textures")] + pub(crate) clearcoat_normal_channel: UvChannel, + #[cfg(feature = "pbr_multi_layer_material_textures")] + pub(crate) clearcoat_normal_texture: Option>, +} + +impl ClearcoatExtension { + #[expect( + clippy::allow_attributes, + reason = "`unused_variables` is not always linted" + )] + #[allow( + unused_variables, + reason = "Depending on what features are used to compile this crate, certain parameters may end up unused." + )] + pub(crate) fn parse( + load_context: &mut LoadContext, + document: &Document, + material: &Material, + ) -> Option { + let extension = material + .extensions()? + .get("KHR_materials_clearcoat")? + .as_object()?; + + #[cfg(feature = "pbr_multi_layer_material_textures")] + let (clearcoat_channel, clearcoat_texture) = parse_material_extension_texture( + material, + load_context, + document, + extension, + "clearcoatTexture", + "clearcoat", + ); + + #[cfg(feature = "pbr_multi_layer_material_textures")] + let (clearcoat_roughness_channel, clearcoat_roughness_texture) = + parse_material_extension_texture( + material, + load_context, + document, + extension, + "clearcoatRoughnessTexture", + "clearcoat roughness", + ); + + #[cfg(feature = "pbr_multi_layer_material_textures")] + let (clearcoat_normal_channel, clearcoat_normal_texture) = parse_material_extension_texture( + material, + load_context, + document, + extension, + "clearcoatNormalTexture", + "clearcoat normal", + ); + + Some(ClearcoatExtension { + clearcoat_factor: extension.get("clearcoatFactor").and_then(Value::as_f64), + clearcoat_roughness_factor: extension + .get("clearcoatRoughnessFactor") + .and_then(Value::as_f64), + #[cfg(feature = "pbr_multi_layer_material_textures")] + clearcoat_channel, + #[cfg(feature = "pbr_multi_layer_material_textures")] + clearcoat_texture, + #[cfg(feature = "pbr_multi_layer_material_textures")] + clearcoat_roughness_channel, + #[cfg(feature = "pbr_multi_layer_material_textures")] + clearcoat_roughness_texture, + #[cfg(feature = "pbr_multi_layer_material_textures")] + clearcoat_normal_channel, + #[cfg(feature = "pbr_multi_layer_material_textures")] + clearcoat_normal_texture, + }) + } +} diff --git a/crates/bevy_gltf/src/loader/extensions/khr_materials_specular.rs b/crates/bevy_gltf/src/loader/extensions/khr_materials_specular.rs new file mode 100644 index 0000000000..f0adcc4940 --- /dev/null +++ b/crates/bevy_gltf/src/loader/extensions/khr_materials_specular.rs @@ -0,0 +1,100 @@ +use bevy_asset::LoadContext; + +use gltf::{Document, Material}; + +use serde_json::Value; + +#[cfg(feature = "pbr_specular_textures")] +use { + crate::loader::gltf_ext::material::parse_material_extension_texture, bevy_asset::Handle, + bevy_image::Image, bevy_pbr::UvChannel, +}; + +/// Parsed data from the `KHR_materials_specular` extension. +/// +/// We currently don't parse `specularFactor` and `specularTexture`, since +/// they're incompatible with Filament. +/// +/// Note that the map is a *specular map*, not a *reflectance map*. In Bevy and +/// Filament terms, the reflectance values in the specular map range from [0.0, +/// 0.5], rather than [0.0, 1.0]. This is an unfortunate +/// `KHR_materials_specular` specification requirement that stems from the fact +/// that glTF is specified in terms of a specular strength model, not the +/// reflectance model that Filament and Bevy use. A workaround, which is noted +/// in the [`StandardMaterial`](bevy_pbr::StandardMaterial) documentation, is to set the reflectance value +/// to 2.0, which spreads the specular map range from [0.0, 1.0] as normal. +/// +/// See the specification: +/// +#[derive(Default)] +pub(crate) struct SpecularExtension { + pub(crate) specular_factor: Option, + #[cfg(feature = "pbr_specular_textures")] + pub(crate) specular_channel: UvChannel, + #[cfg(feature = "pbr_specular_textures")] + pub(crate) specular_texture: Option>, + pub(crate) specular_color_factor: Option<[f64; 3]>, + #[cfg(feature = "pbr_specular_textures")] + pub(crate) specular_color_channel: UvChannel, + #[cfg(feature = "pbr_specular_textures")] + pub(crate) specular_color_texture: Option>, +} + +impl SpecularExtension { + pub(crate) fn parse( + _load_context: &mut LoadContext, + _document: &Document, + material: &Material, + ) -> Option { + let extension = material + .extensions()? + .get("KHR_materials_specular")? + .as_object()?; + + #[cfg(feature = "pbr_specular_textures")] + let (_specular_channel, _specular_texture) = parse_material_extension_texture( + material, + _load_context, + _document, + extension, + "specularTexture", + "specular", + ); + + #[cfg(feature = "pbr_specular_textures")] + let (_specular_color_channel, _specular_color_texture) = parse_material_extension_texture( + material, + _load_context, + _document, + extension, + "specularColorTexture", + "specular color", + ); + + Some(SpecularExtension { + specular_factor: extension.get("specularFactor").and_then(Value::as_f64), + #[cfg(feature = "pbr_specular_textures")] + specular_channel: _specular_channel, + #[cfg(feature = "pbr_specular_textures")] + specular_texture: _specular_texture, + specular_color_factor: extension + .get("specularColorFactor") + .and_then(Value::as_array) + .and_then(|json_array| { + if json_array.len() < 3 { + None + } else { + Some([ + json_array[0].as_f64()?, + json_array[1].as_f64()?, + json_array[2].as_f64()?, + ]) + } + }), + #[cfg(feature = "pbr_specular_textures")] + specular_color_channel: _specular_color_channel, + #[cfg(feature = "pbr_specular_textures")] + specular_color_texture: _specular_color_texture, + }) + } +} diff --git a/crates/bevy_gltf/src/loader/extensions/mod.rs b/crates/bevy_gltf/src/loader/extensions/mod.rs new file mode 100644 index 0000000000..14863fa453 --- /dev/null +++ b/crates/bevy_gltf/src/loader/extensions/mod.rs @@ -0,0 +1,10 @@ +//! glTF extensions defined by the Khronos Group and other vendors + +mod khr_materials_anisotropy; +mod khr_materials_clearcoat; +mod khr_materials_specular; + +pub(crate) use self::{ + khr_materials_anisotropy::AnisotropyExtension, khr_materials_clearcoat::ClearcoatExtension, + khr_materials_specular::SpecularExtension, +}; diff --git a/crates/bevy_gltf/src/loader/gltf_ext/material.rs b/crates/bevy_gltf/src/loader/gltf_ext/material.rs new file mode 100644 index 0000000000..9d8b7c5745 --- /dev/null +++ b/crates/bevy_gltf/src/loader/gltf_ext/material.rs @@ -0,0 +1,165 @@ +use bevy_math::Affine2; +use bevy_pbr::UvChannel; +use bevy_render::alpha::AlphaMode; + +use gltf::{json::texture::Info, Material}; + +use serde_json::value; + +use crate::GltfAssetLabel; + +use super::texture::texture_transform_to_affine2; + +#[cfg(any( + feature = "pbr_specular_textures", + feature = "pbr_multi_layer_material_textures" +))] +use { + super::texture::texture_handle_from_info, + bevy_asset::{Handle, LoadContext}, + bevy_image::Image, + gltf::Document, + serde_json::{Map, Value}, +}; + +/// Parses a texture that's part of a material extension block and returns its +/// UV channel and image reference. +#[cfg(any( + feature = "pbr_specular_textures", + feature = "pbr_multi_layer_material_textures" +))] +pub(crate) fn parse_material_extension_texture( + material: &Material, + load_context: &mut LoadContext, + document: &Document, + extension: &Map, + texture_name: &str, + texture_kind: &str, +) -> (UvChannel, Option>) { + match extension + .get(texture_name) + .and_then(|value| value::from_value::(value.clone()).ok()) + { + Some(json_info) => ( + uv_channel(material, texture_kind, json_info.tex_coord), + Some(texture_handle_from_info(&json_info, document, load_context)), + ), + None => (UvChannel::default(), None), + } +} + +pub(crate) fn uv_channel(material: &Material, texture_kind: &str, tex_coord: u32) -> UvChannel { + match tex_coord { + 0 => UvChannel::Uv0, + 1 => UvChannel::Uv1, + _ => { + let material_name = material + .name() + .map(|n| format!("the material \"{n}\"")) + .unwrap_or_else(|| "an unnamed material".to_string()); + let material_index = material + .index() + .map(|i| format!("index {i}")) + .unwrap_or_else(|| "default".to_string()); + tracing::warn!( + "Only 2 UV Channels are supported, but {material_name} ({material_index}) \ + has the TEXCOORD attribute {} on texture kind {texture_kind}, which will fallback to 0.", + tex_coord, + ); + UvChannel::Uv0 + } + } +} + +pub(crate) fn alpha_mode(material: &Material) -> AlphaMode { + match material.alpha_mode() { + gltf::material::AlphaMode::Opaque => AlphaMode::Opaque, + gltf::material::AlphaMode::Mask => AlphaMode::Mask(material.alpha_cutoff().unwrap_or(0.5)), + gltf::material::AlphaMode::Blend => AlphaMode::Blend, + } +} + +/// Returns the index (within the `textures` array) of the texture with the +/// given field name in the data for the material extension with the given name, +/// if there is one. +pub(crate) fn extension_texture_index( + material: &Material, + extension_name: &str, + texture_field_name: &str, +) -> Option { + Some( + value::from_value::( + material + .extensions()? + .get(extension_name)? + .as_object()? + .get(texture_field_name)? + .clone(), + ) + .ok()? + .index + .value(), + ) +} + +/// Returns true if the material needs mesh tangents in order to be successfully +/// rendered. +/// +/// We generate them if this function returns true. +pub(crate) fn needs_tangents(material: &Material) -> bool { + [ + material.normal_texture().is_some(), + #[cfg(feature = "pbr_multi_layer_material_textures")] + extension_texture_index( + material, + "KHR_materials_clearcoat", + "clearcoatNormalTexture", + ) + .is_some(), + ] + .into_iter() + .reduce(|a, b| a || b) + .unwrap_or(false) +} + +pub(crate) fn warn_on_differing_texture_transforms( + material: &Material, + info: &gltf::texture::Info, + texture_transform: Affine2, + texture_kind: &str, +) { + let has_differing_texture_transform = info + .texture_transform() + .map(texture_transform_to_affine2) + .is_some_and(|t| t != texture_transform); + if has_differing_texture_transform { + let material_name = material + .name() + .map(|n| format!("the material \"{n}\"")) + .unwrap_or_else(|| "an unnamed material".to_string()); + let texture_name = info + .texture() + .name() + .map(|n| format!("its {texture_kind} texture \"{n}\"")) + .unwrap_or_else(|| format!("its unnamed {texture_kind} texture")); + let material_index = material + .index() + .map(|i| format!("index {i}")) + .unwrap_or_else(|| "default".to_string()); + tracing::warn!( + "Only texture transforms on base color textures are supported, but {material_name} ({material_index}) \ + has a texture transform on {texture_name} (index {}), which will be ignored.", info.texture().index() + ); + } +} + +pub(crate) fn material_label(material: &Material, is_scale_inverted: bool) -> GltfAssetLabel { + if let Some(index) = material.index() { + GltfAssetLabel::Material { + index, + is_scale_inverted, + } + } else { + GltfAssetLabel::DefaultMaterial + } +} diff --git a/crates/bevy_gltf/src/loader/gltf_ext/mesh.rs b/crates/bevy_gltf/src/loader/gltf_ext/mesh.rs new file mode 100644 index 0000000000..ef719891a4 --- /dev/null +++ b/crates/bevy_gltf/src/loader/gltf_ext/mesh.rs @@ -0,0 +1,33 @@ +use bevy_mesh::PrimitiveTopology; + +use gltf::mesh::{Mesh, Mode, Primitive}; + +use crate::GltfError; + +pub(crate) fn primitive_name(mesh: &Mesh<'_>, primitive: &Primitive) -> String { + let mesh_name = mesh.name().unwrap_or("Mesh"); + if mesh.primitives().len() > 1 { + format!("{}.{}", mesh_name, primitive.index()) + } else { + mesh_name.to_string() + } +} + +/// Maps the `primitive_topology` from glTF to `wgpu`. +#[cfg_attr( + not(target_arch = "wasm32"), + expect( + clippy::result_large_err, + reason = "`GltfError` is only barely past the threshold for large errors." + ) +)] +pub(crate) fn primitive_topology(mode: Mode) -> Result { + match mode { + Mode::Points => Ok(PrimitiveTopology::PointList), + Mode::Lines => Ok(PrimitiveTopology::LineList), + Mode::LineStrip => Ok(PrimitiveTopology::LineStrip), + Mode::Triangles => Ok(PrimitiveTopology::TriangleList), + Mode::TriangleStrip => Ok(PrimitiveTopology::TriangleStrip), + mode => Err(GltfError::UnsupportedPrimitive { mode }), + } +} diff --git a/crates/bevy_gltf/src/loader/gltf_ext/mod.rs b/crates/bevy_gltf/src/loader/gltf_ext/mod.rs new file mode 100644 index 0000000000..6036948d9c --- /dev/null +++ b/crates/bevy_gltf/src/loader/gltf_ext/mod.rs @@ -0,0 +1,82 @@ +//! Methods to access information from [`gltf`] types + +pub mod material; +pub mod mesh; +pub mod scene; +pub mod texture; + +use bevy_platform::collections::HashSet; + +use fixedbitset::FixedBitSet; +use gltf::{Document, Gltf}; + +use super::GltfError; + +use self::{material::extension_texture_index, scene::check_is_part_of_cycle}; + +#[cfg_attr( + not(target_arch = "wasm32"), + expect( + clippy::result_large_err, + reason = "need to be signature compatible with `load_gltf`" + ) +)] +/// Checks all glTF nodes for cycles, starting at the scene root. +pub(crate) fn check_for_cycles(gltf: &Gltf) -> Result<(), GltfError> { + // Initialize with the scene roots. + let mut roots = FixedBitSet::with_capacity(gltf.nodes().len()); + for root in gltf.scenes().flat_map(|scene| scene.nodes()) { + roots.insert(root.index()); + } + + // Check each one. + let mut visited = FixedBitSet::with_capacity(gltf.nodes().len()); + for root in roots.ones() { + let Some(node) = gltf.nodes().nth(root) else { + unreachable!("Index of a root node should always exist."); + }; + check_is_part_of_cycle(&node, &mut visited)?; + } + + Ok(()) +} + +pub(crate) fn get_linear_textures(document: &Document) -> HashSet { + let mut linear_textures = HashSet::default(); + + for material in document.materials() { + if let Some(texture) = material.normal_texture() { + linear_textures.insert(texture.texture().index()); + } + if let Some(texture) = material.occlusion_texture() { + linear_textures.insert(texture.texture().index()); + } + if let Some(texture) = material + .pbr_metallic_roughness() + .metallic_roughness_texture() + { + linear_textures.insert(texture.texture().index()); + } + if let Some(texture_index) = + extension_texture_index(&material, "KHR_materials_anisotropy", "anisotropyTexture") + { + linear_textures.insert(texture_index); + } + + // None of the clearcoat maps should be loaded as sRGB. + #[cfg(feature = "pbr_multi_layer_material_textures")] + for texture_field_name in [ + "clearcoatTexture", + "clearcoatRoughnessTexture", + "clearcoatNormalTexture", + ] { + if let Some(texture_index) = + extension_texture_index(&material, "KHR_materials_clearcoat", texture_field_name) + { + linear_textures.insert(texture_index); + } + } + } + + linear_textures +} diff --git a/crates/bevy_gltf/src/loader/gltf_ext/scene.rs b/crates/bevy_gltf/src/loader/gltf_ext/scene.rs new file mode 100644 index 0000000000..83e6778b99 --- /dev/null +++ b/crates/bevy_gltf/src/loader/gltf_ext/scene.rs @@ -0,0 +1,94 @@ +use bevy_ecs::name::Name; +use bevy_math::{Mat4, Vec3}; +use bevy_transform::components::Transform; + +use gltf::scene::Node; + +use fixedbitset::FixedBitSet; +use itertools::Itertools; + +#[cfg(feature = "bevy_animation")] +use bevy_platform::collections::{HashMap, HashSet}; + +use crate::GltfError; + +pub(crate) fn node_name(node: &Node) -> Name { + let name = node + .name() + .map(ToString::to_string) + .unwrap_or_else(|| format!("GltfNode{}", node.index())); + Name::new(name) +} + +/// Calculate the transform of gLTF [`Node`]. +/// +/// This should be used instead of calling [`gltf::scene::Transform::matrix()`] +/// on [`Node::transform()`](gltf::Node::transform) directly because it uses optimized glam types and +/// if `libm` feature of `bevy_math` crate is enabled also handles cross +/// platform determinism properly. +pub(crate) fn node_transform(node: &Node) -> Transform { + match node.transform() { + gltf::scene::Transform::Matrix { matrix } => { + Transform::from_matrix(Mat4::from_cols_array_2d(&matrix)) + } + gltf::scene::Transform::Decomposed { + translation, + rotation, + scale, + } => Transform { + translation: Vec3::from(translation), + rotation: bevy_math::Quat::from_array(rotation), + scale: Vec3::from(scale), + }, + } +} + +#[cfg_attr( + not(target_arch = "wasm32"), + expect( + clippy::result_large_err, + reason = "need to be signature compatible with `load_gltf`" + ) +)] +/// Check if [`Node`] is part of cycle +pub(crate) fn check_is_part_of_cycle( + node: &Node, + visited: &mut FixedBitSet, +) -> Result<(), GltfError> { + // Do we have a cycle? + if visited.contains(node.index()) { + return Err(GltfError::CircularChildren(format!( + "glTF nodes form a cycle: {} -> {}", + visited.ones().map(|bit| bit.to_string()).join(" -> "), + node.index() + ))); + } + + // Recurse. + visited.insert(node.index()); + for kid in node.children() { + check_is_part_of_cycle(&kid, visited)?; + } + visited.remove(node.index()); + + Ok(()) +} + +#[cfg(feature = "bevy_animation")] +pub(crate) fn collect_path( + node: &Node, + current_path: &[Name], + paths: &mut HashMap)>, + root_index: usize, + visited: &mut HashSet, +) { + let mut path = current_path.to_owned(); + path.push(node_name(node)); + visited.insert(node.index()); + for child in node.children() { + if !visited.contains(&child.index()) { + collect_path(&child, &path, paths, root_index, visited); + } + } + paths.insert(node.index(), (root_index, path)); +} diff --git a/crates/bevy_gltf/src/loader/gltf_ext/texture.rs b/crates/bevy_gltf/src/loader/gltf_ext/texture.rs new file mode 100644 index 0000000000..5fb5bcce0d --- /dev/null +++ b/crates/bevy_gltf/src/loader/gltf_ext/texture.rs @@ -0,0 +1,126 @@ +use bevy_asset::{Handle, LoadContext}; +use bevy_image::{Image, ImageAddressMode, ImageFilterMode, ImageSamplerDescriptor}; +use bevy_math::Affine2; + +use gltf::{ + image::Source, + texture::{MagFilter, MinFilter, Texture, TextureTransform, WrappingMode}, +}; + +#[cfg(any( + feature = "pbr_anisotropy_texture", + feature = "pbr_multi_layer_material_textures", + feature = "pbr_specular_textures" +))] +use gltf::{json::texture::Info, Document}; + +use crate::{loader::DataUri, GltfAssetLabel}; + +pub(crate) fn texture_handle( + texture: &Texture<'_>, + load_context: &mut LoadContext, +) -> Handle { + match texture.source().source() { + Source::View { .. } => load_context.get_label_handle(texture_label(texture).to_string()), + Source::Uri { uri, .. } => { + let uri = percent_encoding::percent_decode_str(uri) + .decode_utf8() + .unwrap(); + let uri = uri.as_ref(); + if let Ok(_data_uri) = DataUri::parse(uri) { + load_context.get_label_handle(texture_label(texture).to_string()) + } else { + let parent = load_context.path().parent().unwrap(); + let image_path = parent.join(uri); + load_context.load(image_path) + } + } + } +} + +/// Extracts the texture sampler data from the glTF [`Texture`]. +pub(crate) fn texture_sampler(texture: &Texture<'_>) -> ImageSamplerDescriptor { + let gltf_sampler = texture.sampler(); + + ImageSamplerDescriptor { + address_mode_u: address_mode(&gltf_sampler.wrap_s()), + address_mode_v: address_mode(&gltf_sampler.wrap_t()), + + mag_filter: gltf_sampler + .mag_filter() + .map(|mf| match mf { + MagFilter::Nearest => ImageFilterMode::Nearest, + MagFilter::Linear => ImageFilterMode::Linear, + }) + .unwrap_or(ImageSamplerDescriptor::default().mag_filter), + + min_filter: gltf_sampler + .min_filter() + .map(|mf| match mf { + MinFilter::Nearest + | MinFilter::NearestMipmapNearest + | MinFilter::NearestMipmapLinear => ImageFilterMode::Nearest, + MinFilter::Linear + | MinFilter::LinearMipmapNearest + | MinFilter::LinearMipmapLinear => ImageFilterMode::Linear, + }) + .unwrap_or(ImageSamplerDescriptor::default().min_filter), + + mipmap_filter: gltf_sampler + .min_filter() + .map(|mf| match mf { + MinFilter::Nearest + | MinFilter::Linear + | MinFilter::NearestMipmapNearest + | MinFilter::LinearMipmapNearest => ImageFilterMode::Nearest, + MinFilter::NearestMipmapLinear | MinFilter::LinearMipmapLinear => { + ImageFilterMode::Linear + } + }) + .unwrap_or(ImageSamplerDescriptor::default().mipmap_filter), + + ..Default::default() + } +} + +pub(crate) fn texture_label(texture: &Texture<'_>) -> GltfAssetLabel { + GltfAssetLabel::Texture(texture.index()) +} + +pub(crate) fn address_mode(wrapping_mode: &WrappingMode) -> ImageAddressMode { + match wrapping_mode { + WrappingMode::ClampToEdge => ImageAddressMode::ClampToEdge, + WrappingMode::Repeat => ImageAddressMode::Repeat, + WrappingMode::MirroredRepeat => ImageAddressMode::MirrorRepeat, + } +} + +pub(crate) fn texture_transform_to_affine2(texture_transform: TextureTransform) -> Affine2 { + Affine2::from_scale_angle_translation( + texture_transform.scale().into(), + -texture_transform.rotation(), + texture_transform.offset().into(), + ) +} + +#[cfg(any( + feature = "pbr_anisotropy_texture", + feature = "pbr_multi_layer_material_textures", + feature = "pbr_specular_textures" +))] +/// Given a [`Info`], returns the handle of the texture that this +/// refers to. +/// +/// This is a low-level function only used when the [`gltf`] crate has no support +/// for an extension, forcing us to parse its texture references manually. +pub(crate) fn texture_handle_from_info( + info: &Info, + document: &Document, + load_context: &mut LoadContext, +) -> Handle { + let texture = document + .textures() + .nth(info.index.value()) + .expect("Texture info references a nonexistent texture"); + texture_handle(&texture, load_context) +} diff --git a/crates/bevy_gltf/src/loader.rs b/crates/bevy_gltf/src/loader/mod.rs similarity index 72% rename from crates/bevy_gltf/src/loader.rs rename to crates/bevy_gltf/src/loader/mod.rs index 1a8e170ee7..a4e25475b7 100644 --- a/crates/bevy_gltf/src/loader.rs +++ b/crates/bevy_gltf/src/loader/mod.rs @@ -1,73 +1,86 @@ -use crate::{ - vertex_attributes::convert_attribute, Gltf, GltfAssetLabel, GltfExtras, GltfMaterialExtras, - GltfMaterialName, GltfMeshExtras, GltfNode, GltfSceneExtras, GltfSkin, +mod extensions; +mod gltf_ext; + +use std::{ + io::Error, + path::{Path, PathBuf}, }; +#[cfg(feature = "bevy_animation")] +use bevy_animation::{prelude::*, AnimationTarget, AnimationTargetId}; use bevy_asset::{ io::Reader, AssetLoadError, AssetLoader, Handle, LoadContext, ReadAssetBytesError, + RenderAssetUsages, }; use bevy_color::{Color, LinearRgba}; use bevy_core_pipeline::prelude::Camera3d; use bevy_ecs::{ - entity::{hash_map::EntityHashMap, Entity}, + entity::{Entity, EntityHashMap}, hierarchy::ChildSpawner, name::Name, world::World, }; use bevy_image::{ - CompressedImageFormats, Image, ImageAddressMode, ImageFilterMode, ImageLoaderSettings, - ImageSampler, ImageSamplerDescriptor, ImageType, TextureError, + CompressedImageFormats, Image, ImageLoaderSettings, ImageSampler, ImageSamplerDescriptor, + ImageType, TextureError, }; -use bevy_math::{Affine2, Mat4, Vec3}; +use bevy_math::{Mat4, Vec3}; +use bevy_mesh::{ + morph::{MeshMorphWeights, MorphAttributes, MorphTargetImage, MorphWeights}, + skinning::{SkinnedMesh, SkinnedMeshInverseBindposes}, + Indices, Mesh, MeshVertexAttribute, PrimitiveTopology, VertexAttributeValues, +}; +#[cfg(feature = "pbr_transmission_textures")] +use bevy_pbr::UvChannel; use bevy_pbr::{ - DirectionalLight, MeshMaterial3d, PointLight, SpotLight, StandardMaterial, UvChannel, - MAX_JOINTS, + DirectionalLight, MeshMaterial3d, PointLight, SpotLight, StandardMaterial, MAX_JOINTS, }; -use bevy_platform_support::collections::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_render::{ - alpha::AlphaMode, camera::{Camera, OrthographicProjection, PerspectiveProjection, Projection, ScalingMode}, - mesh::{ - morph::{MeshMorphWeights, MorphAttributes, MorphTargetImage, MorphWeights}, - skinning::{SkinnedMesh, SkinnedMeshInverseBindposes}, - Indices, Mesh, Mesh3d, MeshVertexAttribute, VertexAttributeValues, - }, + mesh::Mesh3d, primitives::Aabb, - render_asset::RenderAssetUsages, - render_resource::{Face, PrimitiveTopology}, + render_resource::Face, view::Visibility, }; use bevy_scene::Scene; #[cfg(not(target_arch = "wasm32"))] use bevy_tasks::IoTaskPool; use bevy_transform::components::Transform; -use fixedbitset::FixedBitSet; + use gltf::{ accessor::Iter, image::Source, - json, mesh::{util::ReadIndices, Mode}, - texture::{Info, MagFilter, MinFilter, TextureTransform, WrappingMode}, - Document, Material, Node, Primitive, Semantic, + Document, Material, Node, Semantic, }; -use itertools::Itertools; + use serde::{Deserialize, Serialize}; -#[cfg(any( - feature = "pbr_specular_textures", - feature = "pbr_multi_layer_material_textures" -))] -use serde_json::Map; -use serde_json::{value, Value}; -use std::{ - io::Error, - path::{Path, PathBuf}, -}; +#[cfg(feature = "bevy_animation")] +use smallvec::SmallVec; + use thiserror::Error; use tracing::{error, info_span, warn}; + +use crate::{ + vertex_attributes::convert_attribute, Gltf, GltfAssetLabel, GltfExtras, GltfMaterialExtras, + GltfMaterialName, GltfMeshExtras, GltfNode, GltfSceneExtras, GltfSkin, +}; + #[cfg(feature = "bevy_animation")] -use { - bevy_animation::{prelude::*, AnimationTarget, AnimationTargetId}, - smallvec::SmallVec, +use self::gltf_ext::scene::collect_path; +use self::{ + extensions::{AnisotropyExtension, ClearcoatExtension, SpecularExtension}, + gltf_ext::{ + check_for_cycles, get_linear_textures, + material::{ + alpha_mode, material_label, needs_tangents, uv_channel, + warn_on_differing_texture_transforms, + }, + mesh::{primitive_name, primitive_topology}, + scene::{node_name, node_transform}, + texture::{texture_handle, texture_sampler, texture_transform_to_affine2}, + }, }; /// An error that occurs when loading a glTF file. @@ -110,10 +123,10 @@ pub enum GltfError { MissingAnimationSampler(usize), /// Failed to generate tangents. #[error("failed to generate tangents: {0}")] - GenerateTangentsError(#[from] bevy_render::mesh::GenerateTangentsError), + GenerateTangentsError(#[from] bevy_mesh::GenerateTangentsError), /// Failed to generate morph targets. #[error("failed to generate morph targets: {0}")] - MorphTarget(#[from] bevy_render::mesh::morph::MorphBuildError), + MorphTarget(#[from] bevy_mesh::morph::MorphBuildError), /// Circular children in Nodes #[error("GLTF model must be a tree, found cycle instead at node indices: {0:?}")] #[from(ignore)] @@ -194,6 +207,7 @@ impl AssetLoader for GltfLoader { ) -> Result { let mut bytes = Vec::new(); reader.read_to_end(&mut bytes).await?; + load_gltf(self, &bytes, load_context, settings).await } @@ -210,6 +224,7 @@ async fn load_gltf<'a, 'b, 'c>( settings: &'b GltfLoaderSettings, ) -> Result { let gltf = gltf::Gltf::from_slice(bytes)?; + let file_name = load_context .asset_path() .path() @@ -221,45 +236,7 @@ async fn load_gltf<'a, 'b, 'c>( .to_string(); let buffer_data = load_buffers(&gltf, load_context).await?; - let mut linear_textures = >::default(); - - for material in gltf.materials() { - if let Some(texture) = material.normal_texture() { - linear_textures.insert(texture.texture().index()); - } - if let Some(texture) = material.occlusion_texture() { - linear_textures.insert(texture.texture().index()); - } - if let Some(texture) = material - .pbr_metallic_roughness() - .metallic_roughness_texture() - { - linear_textures.insert(texture.texture().index()); - } - if let Some(texture_index) = material_extension_texture_index( - &material, - "KHR_materials_anisotropy", - "anisotropyTexture", - ) { - linear_textures.insert(texture_index); - } - - // None of the clearcoat maps should be loaded as sRGB. - #[cfg(feature = "pbr_multi_layer_material_textures")] - for texture_field_name in [ - "clearcoatTexture", - "clearcoatRoughnessTexture", - "clearcoatNormalTexture", - ] { - if let Some(texture_index) = material_extension_texture_index( - &material, - "KHR_materials_clearcoat", - texture_field_name, - ) { - linear_textures.insert(texture_index); - } - } - } + let linear_textures = get_linear_textures(&gltf.document); #[cfg(feature = "bevy_animation")] let paths = { @@ -267,7 +244,7 @@ async fn load_gltf<'a, 'b, 'c>( for scene in gltf.scenes() { for node in scene.nodes() { let root_index = node.index(); - paths_recur(node, &[], &mut paths, root_index, &mut HashSet::default()); + collect_path(&node, &[], &mut paths, root_index, &mut HashSet::default()); } } paths @@ -529,35 +506,6 @@ async fn load_gltf<'a, 'b, 'c>( (animations, named_animations, animation_roots) }; - // TODO: use the threaded impl on wasm once wasm thread pool doesn't deadlock on it - // See https://github.com/bevyengine/bevy/issues/1924 for more details - // The taskpool use is also avoided when there is only one texture for performance reasons and - // to avoid https://github.com/bevyengine/bevy/pull/2725 - // PERF: could this be a Vec instead? Are gltf texture indices dense? - fn process_loaded_texture( - load_context: &mut LoadContext, - handles: &mut Vec>, - texture: ImageOrPath, - ) { - let handle = match texture { - ImageOrPath::Image { label, image } => { - load_context.add_labeled_asset(label.to_string(), image) - } - ImageOrPath::Path { - path, - is_srgb, - sampler_descriptor, - } => load_context - .loader() - .with_settings(move |settings: &mut ImageLoaderSettings| { - settings.is_srgb = is_srgb; - settings.sampler = ImageSampler::Descriptor(sampler_descriptor.clone()); - }) - .load(path), - }; - handles.push(handle); - } - // We collect handles to ensure loaded images from paths are not unloaded before they are used elsewhere // in the loader. This prevents "reloads", but it also prevents dropping the is_srgb context on reload. // @@ -577,7 +525,7 @@ async fn load_gltf<'a, 'b, 'c>( settings.load_materials, ) .await?; - process_loaded_texture(load_context, &mut _texture_handles, image); + image.process_loaded_texture(load_context, &mut _texture_handles); } } else { #[cfg(not(target_arch = "wasm32"))] @@ -603,7 +551,7 @@ async fn load_gltf<'a, 'b, 'c>( .into_iter() .for_each(|result| match result { Ok(image) => { - process_loaded_texture(load_context, &mut _texture_handles, image); + image.process_loaded_texture(load_context, &mut _texture_handles); } Err(err) => { warn!("Error loading glTF texture: {}", err); @@ -644,7 +592,7 @@ async fn load_gltf<'a, 'b, 'c>( mesh: gltf_mesh.index(), primitive: primitive.index(), }; - let primitive_topology = get_primitive_topology(primitive.mode())?; + let primitive_topology = primitive_topology(primitive.mode())?; let mut mesh = Mesh::new(primitive_topology, settings.load_meshes); @@ -731,7 +679,7 @@ async fn load_gltf<'a, 'b, 'c>( { mesh.insert_attribute(Mesh::ATTRIBUTE_TANGENT, vertex_attribute); } else if mesh.attribute(Mesh::ATTRIBUTE_NORMAL).is_some() - && material_needs_tangents(&primitive.material()) + && needs_tangents(&primitive.material()) { tracing::debug!( "Missing vertex tangents for {}, computing them using the mikktspace algorithm. Consider using a tool such as Blender to pre-compute the tangents.", file_name @@ -758,13 +706,20 @@ async fn load_gltf<'a, 'b, 'c>( .material() .index() .and_then(|i| materials.get(i).cloned()), - get_gltf_extras(primitive.extras()), - get_gltf_extras(primitive.material().extras()), + primitive.extras().as_deref().map(GltfExtras::from), + primitive + .material() + .extras() + .as_deref() + .map(GltfExtras::from), )); } - let mesh = - super::GltfMesh::new(&gltf_mesh, primitives, get_gltf_extras(gltf_mesh.extras())); + let mesh = super::GltfMesh::new( + &gltf_mesh, + primitives, + gltf_mesh.extras().as_deref().map(GltfExtras::from), + ); let handle = load_context.add_labeled_asset(mesh.asset_label().to_string(), mesh); if let Some(name) = gltf_mesh.name() { @@ -779,12 +734,13 @@ async fn load_gltf<'a, 'b, 'c>( let reader = gltf_skin.reader(|buffer| Some(&buffer_data[buffer.index()])); let local_to_bone_bind_matrices: Vec = reader .read_inverse_bind_matrices() - .unwrap() - .map(|mat| Mat4::from_cols_array_2d(&mat)) - .collect(); + .map(|mats| mats.map(|mat| Mat4::from_cols_array_2d(&mat)).collect()) + .unwrap_or_else(|| { + core::iter::repeat_n(Mat4::IDENTITY, gltf_skin.joints().len()).collect() + }); load_context.add_labeled_asset( - inverse_bind_matrices_label(&gltf_skin), + GltfAssetLabel::InverseBindMatrices(gltf_skin.index()).to_string(), SkinnedMeshInverseBindposes::from(local_to_bone_bind_matrices), ) }) @@ -792,7 +748,7 @@ async fn load_gltf<'a, 'b, 'c>( let mut nodes = HashMap::>::default(); let mut named_nodes = >::default(); - let mut skins = vec![]; + let mut skins = >::default(); let mut named_skins = >::default(); // First, create the node handles. @@ -803,42 +759,47 @@ async fn load_gltf<'a, 'b, 'c>( } // Then check for cycles. - check_gltf_for_cycles(&gltf)?; + check_for_cycles(&gltf)?; // Now populate the nodes. for node in gltf.nodes() { let skin = node.skin().map(|skin| { - let joints: Vec<_> = skin - .joints() - .map(|joint| nodes.get(&joint.index()).unwrap().clone()) - .collect(); + skins + .entry(skin.index()) + .or_insert_with(|| { + let joints: Vec<_> = skin + .joints() + .map(|joint| nodes.get(&joint.index()).unwrap().clone()) + .collect(); - if joints.len() > MAX_JOINTS { - warn!( - "The glTF skin {} has {} joints, but the maximum supported is {}", - skin.name() - .map(ToString::to_string) - .unwrap_or_else(|| skin.index().to_string()), - joints.len(), - MAX_JOINTS - ); - } + if joints.len() > MAX_JOINTS { + warn!( + "The glTF skin {} has {} joints, but the maximum supported is {}", + skin.name() + .map(ToString::to_string) + .unwrap_or_else(|| skin.index().to_string()), + joints.len(), + MAX_JOINTS + ); + } - let gltf_skin = GltfSkin::new( - &skin, - joints, - skinned_mesh_inverse_bindposes[skin.index()].clone(), - get_gltf_extras(skin.extras()), - ); + let gltf_skin = GltfSkin::new( + &skin, + joints, + skinned_mesh_inverse_bindposes[skin.index()].clone(), + skin.extras().as_deref().map(GltfExtras::from), + ); - let handle = load_context.add_labeled_asset(skin_label(&skin), gltf_skin); + let handle = load_context + .add_labeled_asset(gltf_skin.asset_label().to_string(), gltf_skin); - skins.push(handle.clone()); - if let Some(name) = skin.name() { - named_skins.insert(name.into(), handle.clone()); - } + if let Some(name) = skin.name() { + named_skins.insert(name.into(), handle.clone()); + } - handle + handle + }) + .clone() }); let children = node @@ -857,7 +818,7 @@ async fn load_gltf<'a, 'b, 'c>( mesh, node_transform(&node), skin, - get_gltf_extras(node.extras()), + node.extras().as_deref().map(GltfExtras::from), ); #[cfg(feature = "bevy_animation")] @@ -952,7 +913,10 @@ async fn load_gltf<'a, 'b, 'c>( }); } let loaded_scene = scene_load_context.finish(Scene::new(world)); - let scene_handle = load_context.add_loaded_labeled_asset(scene_label(&scene), loaded_scene); + let scene_handle = load_context.add_loaded_labeled_asset( + GltfAssetLabel::Scene(scene.index()).to_string(), + loaded_scene, + ); if let Some(name) = scene.name() { named_scenes.insert(name.into(), scene_handle.clone()); @@ -969,7 +933,7 @@ async fn load_gltf<'a, 'b, 'c>( named_scenes, meshes, named_meshes, - skins, + skins: skins.into_values().collect(), named_skins, materials, named_materials, @@ -987,62 +951,6 @@ async fn load_gltf<'a, 'b, 'c>( }) } -fn get_gltf_extras(extras: &json::Extras) -> Option { - extras.as_ref().map(|extras| GltfExtras { - value: extras.get().to_string(), - }) -} - -/// Calculate the transform of gLTF node. -/// -/// This should be used instead of calling [`gltf::scene::Transform::matrix()`] -/// on [`Node::transform()`] directly because it uses optimized glam types and -/// if `libm` feature of `bevy_math` crate is enabled also handles cross -/// platform determinism properly. -fn node_transform(node: &Node) -> Transform { - match node.transform() { - gltf::scene::Transform::Matrix { matrix } => { - Transform::from_matrix(Mat4::from_cols_array_2d(&matrix)) - } - gltf::scene::Transform::Decomposed { - translation, - rotation, - scale, - } => Transform { - translation: Vec3::from(translation), - rotation: bevy_math::Quat::from_array(rotation), - scale: Vec3::from(scale), - }, - } -} - -fn node_name(node: &Node) -> Name { - let name = node - .name() - .map(ToString::to_string) - .unwrap_or_else(|| format!("GltfNode{}", node.index())); - Name::new(name) -} - -#[cfg(feature = "bevy_animation")] -fn paths_recur( - node: Node, - current_path: &[Name], - paths: &mut HashMap)>, - root_index: usize, - visited: &mut HashSet, -) { - let mut path = current_path.to_owned(); - path.push(node_name(&node)); - visited.insert(node.index()); - for child in node.children() { - if !visited.contains(&child.index()) { - paths_recur(child, &path, paths, root_index, visited); - } - } - paths.insert(node.index(), (root_index, path)); -} - /// Loads a glTF texture as a bevy [`Image`] and returns it together with its label. async fn load_image<'a, 'b>( gltf_texture: gltf::Texture<'a>, @@ -1054,18 +962,13 @@ async fn load_image<'a, 'b>( ) -> Result { let is_srgb = !linear_textures.contains(&gltf_texture.index()); let sampler_descriptor = texture_sampler(&gltf_texture); - #[cfg(all(debug_assertions, feature = "dds"))] - let name = gltf_texture - .name() - .map_or("Unknown GLTF Texture".to_string(), ToString::to_string); + match gltf_texture.source().source() { Source::View { view, mime_type } => { let start = view.offset(); let end = view.offset() + view.length(); let buffer = &buffer_data[view.buffer().index()][start..end]; let image = Image::from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] - name, buffer, ImageType::MimeType(mime_type), supported_compressed_formats, @@ -1088,8 +991,6 @@ async fn load_image<'a, 'b>( let image_type = ImageType::MimeType(data_uri.mime_type); Ok(ImageOrPath::Image { image: Image::from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] - name, &bytes, mime_type.map(ImageType::MimeType).unwrap_or(image_type), supported_compressed_formats, @@ -1119,40 +1020,37 @@ fn load_material( is_scale_inverted: bool, ) -> Handle { let material_label = material_label(material, is_scale_inverted); - load_context.labeled_asset_scope(material_label, |load_context| { + load_context.labeled_asset_scope(material_label.to_string(), |load_context| { let pbr = material.pbr_metallic_roughness(); // TODO: handle missing label handle errors here? let color = pbr.base_color_factor(); let base_color_channel = pbr .base_color_texture() - .map(|info| get_uv_channel(material, "base color", info.tex_coord())) + .map(|info| uv_channel(material, "base color", info.tex_coord())) .unwrap_or_default(); let base_color_texture = pbr .base_color_texture() - .map(|info| texture_handle(load_context, &info.texture())); + .map(|info| texture_handle(&info.texture(), load_context)); let uv_transform = pbr .base_color_texture() - .and_then(|info| { - info.texture_transform() - .map(convert_texture_transform_to_affine2) - }) + .and_then(|info| info.texture_transform().map(texture_transform_to_affine2)) .unwrap_or_default(); let normal_map_channel = material .normal_texture() - .map(|info| get_uv_channel(material, "normal map", info.tex_coord())) + .map(|info| uv_channel(material, "normal map", info.tex_coord())) .unwrap_or_default(); let normal_map_texture: Option> = material.normal_texture().map(|normal_texture| { // TODO: handle normal_texture.scale - texture_handle(load_context, &normal_texture.texture()) + texture_handle(&normal_texture.texture(), load_context) }); let metallic_roughness_channel = pbr .metallic_roughness_texture() - .map(|info| get_uv_channel(material, "metallic/roughness", info.tex_coord())) + .map(|info| uv_channel(material, "metallic/roughness", info.tex_coord())) .unwrap_or_default(); let metallic_roughness_texture = pbr.metallic_roughness_texture().map(|info| { warn_on_differing_texture_transforms( @@ -1161,27 +1059,27 @@ fn load_material( uv_transform, "metallic/roughness", ); - texture_handle(load_context, &info.texture()) + texture_handle(&info.texture(), load_context) }); let occlusion_channel = material .occlusion_texture() - .map(|info| get_uv_channel(material, "occlusion", info.tex_coord())) + .map(|info| uv_channel(material, "occlusion", info.tex_coord())) .unwrap_or_default(); let occlusion_texture = material.occlusion_texture().map(|occlusion_texture| { // TODO: handle occlusion_texture.strength() (a scalar multiplier for occlusion strength) - texture_handle(load_context, &occlusion_texture.texture()) + texture_handle(&occlusion_texture.texture(), load_context) }); let emissive = material.emissive_factor(); let emissive_channel = material .emissive_texture() - .map(|info| get_uv_channel(material, "emissive", info.tex_coord())) + .map(|info| uv_channel(material, "emissive", info.tex_coord())) .unwrap_or_default(); let emissive_texture = material.emissive_texture().map(|info| { // TODO: handle occlusion_texture.strength() (a scalar multiplier for occlusion strength) warn_on_differing_texture_transforms(material, &info, uv_transform, "emissive"); - texture_handle(load_context, &info.texture()) + texture_handle(&info.texture(), load_context) }); #[cfg(feature = "pbr_transmission_textures")] @@ -1191,14 +1089,12 @@ fn load_material( .map_or((0.0, UvChannel::Uv0, None), |transmission| { let specular_transmission_channel = transmission .transmission_texture() - .map(|info| { - get_uv_channel(material, "specular/transmission", info.tex_coord()) - }) + .map(|info| uv_channel(material, "specular/transmission", info.tex_coord())) .unwrap_or_default(); let transmission_texture: Option> = transmission .transmission_texture() .map(|transmission_texture| { - texture_handle(load_context, &transmission_texture.texture()) + texture_handle(&transmission_texture.texture(), load_context) }); ( @@ -1225,11 +1121,11 @@ fn load_material( |volume| { let thickness_channel = volume .thickness_texture() - .map(|info| get_uv_channel(material, "thickness", info.tex_coord())) + .map(|info| uv_channel(material, "thickness", info.tex_coord())) .unwrap_or_default(); let thickness_texture: Option> = volume.thickness_texture().map(|thickness_texture| { - texture_handle(load_context, &thickness_texture.texture()) + texture_handle(&thickness_texture.texture(), load_context) }); ( @@ -1356,72 +1252,13 @@ fn load_material( }) } -fn get_uv_channel(material: &Material, texture_kind: &str, tex_coord: u32) -> UvChannel { - match tex_coord { - 0 => UvChannel::Uv0, - 1 => UvChannel::Uv1, - _ => { - let material_name = material - .name() - .map(|n| format!("the material \"{n}\"")) - .unwrap_or_else(|| "an unnamed material".to_string()); - let material_index = material - .index() - .map(|i| format!("index {i}")) - .unwrap_or_else(|| "default".to_string()); - warn!( - "Only 2 UV Channels are supported, but {material_name} ({material_index}) \ - has the TEXCOORD attribute {} on texture kind {texture_kind}, which will fallback to 0.", - tex_coord, - ); - UvChannel::Uv0 - } - } -} - -fn convert_texture_transform_to_affine2(texture_transform: TextureTransform) -> Affine2 { - Affine2::from_scale_angle_translation( - texture_transform.scale().into(), - -texture_transform.rotation(), - texture_transform.offset().into(), - ) -} - -fn warn_on_differing_texture_transforms( - material: &Material, - info: &Info, - texture_transform: Affine2, - texture_kind: &str, -) { - let has_differing_texture_transform = info - .texture_transform() - .map(convert_texture_transform_to_affine2) - .is_some_and(|t| t != texture_transform); - if has_differing_texture_transform { - let material_name = material - .name() - .map(|n| format!("the material \"{n}\"")) - .unwrap_or_else(|| "an unnamed material".to_string()); - let texture_name = info - .texture() - .name() - .map(|n| format!("its {texture_kind} texture \"{n}\"")) - .unwrap_or_else(|| format!("its unnamed {texture_kind} texture")); - let material_index = material - .index() - .map(|i| format!("index {i}")) - .unwrap_or_else(|| "default".to_string()); - warn!( - "Only texture transforms on base color textures are supported, but {material_name} ({material_index}) \ - has a texture transform on {texture_name} (index {}), which will be ignored.", info.texture().index() - ); - } -} - /// Loads a glTF node. -#[expect( - clippy::result_large_err, - reason = "`GltfError` is only barely past the threshold for large errors." +#[cfg_attr( + not(target_arch = "wasm32"), + expect( + clippy::result_large_err, + reason = "`GltfError` is only barely past the threshold for large errors." + ) )] fn load_node( gltf_node: &Node, @@ -1535,7 +1372,7 @@ fn load_node( // append primitives for primitive in mesh.primitives() { let material = primitive.material(); - let material_label = material_label(&material, is_scale_inverted); + let material_label = material_label(&material, is_scale_inverted).to_string(); // This will make sure we load the default material now since it would not have been // added when iterating over all the gltf materials (since the default material is @@ -1729,164 +1566,6 @@ fn load_node( } } -fn primitive_name(mesh: &gltf::Mesh, primitive: &Primitive) -> String { - let mesh_name = mesh.name().unwrap_or("Mesh"); - if mesh.primitives().len() > 1 { - format!("{}.{}", mesh_name, primitive.index()) - } else { - mesh_name.to_string() - } -} - -/// Returns the label for the `material`. -fn material_label(material: &Material, is_scale_inverted: bool) -> String { - if let Some(index) = material.index() { - GltfAssetLabel::Material { - index, - is_scale_inverted, - } - .to_string() - } else { - GltfAssetLabel::DefaultMaterial.to_string() - } -} - -fn texture_handle(load_context: &mut LoadContext, texture: &gltf::Texture) -> Handle { - match texture.source().source() { - Source::View { .. } => { - load_context.get_label_handle(GltfAssetLabel::Texture(texture.index()).to_string()) - } - Source::Uri { uri, .. } => { - let uri = percent_encoding::percent_decode_str(uri) - .decode_utf8() - .unwrap(); - let uri = uri.as_ref(); - if let Ok(_data_uri) = DataUri::parse(uri) { - load_context.get_label_handle(GltfAssetLabel::Texture(texture.index()).to_string()) - } else { - let parent = load_context.path().parent().unwrap(); - let image_path = parent.join(uri); - load_context.load(image_path) - } - } - } -} - -/// Given a [`json::texture::Info`], returns the handle of the texture that this -/// refers to. -/// -/// This is a low-level function only used when the `gltf` crate has no support -/// for an extension, forcing us to parse its texture references manually. -#[cfg(any( - feature = "pbr_anisotropy_texture", - feature = "pbr_multi_layer_material_textures", - feature = "pbr_specular_textures" -))] -fn texture_handle_from_info( - load_context: &mut LoadContext, - document: &Document, - texture_info: &json::texture::Info, -) -> Handle { - let texture = document - .textures() - .nth(texture_info.index.value()) - .expect("Texture info references a nonexistent texture"); - texture_handle(load_context, &texture) -} - -/// Returns the label for the `scene`. -fn scene_label(scene: &gltf::Scene) -> String { - GltfAssetLabel::Scene(scene.index()).to_string() -} - -/// Return the label for the `skin`. -fn skin_label(skin: &gltf::Skin) -> String { - GltfAssetLabel::Skin(skin.index()).to_string() -} - -/// Return the label for the `inverseBindMatrices` of the node. -fn inverse_bind_matrices_label(skin: &gltf::Skin) -> String { - GltfAssetLabel::InverseBindMatrices(skin.index()).to_string() -} - -/// Extracts the texture sampler data from the glTF texture. -fn texture_sampler(texture: &gltf::Texture) -> ImageSamplerDescriptor { - let gltf_sampler = texture.sampler(); - - ImageSamplerDescriptor { - address_mode_u: texture_address_mode(&gltf_sampler.wrap_s()), - address_mode_v: texture_address_mode(&gltf_sampler.wrap_t()), - - mag_filter: gltf_sampler - .mag_filter() - .map(|mf| match mf { - MagFilter::Nearest => ImageFilterMode::Nearest, - MagFilter::Linear => ImageFilterMode::Linear, - }) - .unwrap_or(ImageSamplerDescriptor::default().mag_filter), - - min_filter: gltf_sampler - .min_filter() - .map(|mf| match mf { - MinFilter::Nearest - | MinFilter::NearestMipmapNearest - | MinFilter::NearestMipmapLinear => ImageFilterMode::Nearest, - MinFilter::Linear - | MinFilter::LinearMipmapNearest - | MinFilter::LinearMipmapLinear => ImageFilterMode::Linear, - }) - .unwrap_or(ImageSamplerDescriptor::default().min_filter), - - mipmap_filter: gltf_sampler - .min_filter() - .map(|mf| match mf { - MinFilter::Nearest - | MinFilter::Linear - | MinFilter::NearestMipmapNearest - | MinFilter::LinearMipmapNearest => ImageFilterMode::Nearest, - MinFilter::NearestMipmapLinear | MinFilter::LinearMipmapLinear => { - ImageFilterMode::Linear - } - }) - .unwrap_or(ImageSamplerDescriptor::default().mipmap_filter), - - ..Default::default() - } -} - -/// Maps the texture address mode from glTF to wgpu. -fn texture_address_mode(gltf_address_mode: &WrappingMode) -> ImageAddressMode { - match gltf_address_mode { - WrappingMode::ClampToEdge => ImageAddressMode::ClampToEdge, - WrappingMode::Repeat => ImageAddressMode::Repeat, - WrappingMode::MirroredRepeat => ImageAddressMode::MirrorRepeat, - } -} - -/// Maps the `primitive_topology` from glTF to `wgpu`. -#[expect( - clippy::result_large_err, - reason = "`GltfError` is only barely past the threshold for large errors." -)] -fn get_primitive_topology(mode: Mode) -> Result { - match mode { - Mode::Points => Ok(PrimitiveTopology::PointList), - Mode::Lines => Ok(PrimitiveTopology::LineList), - Mode::LineStrip => Ok(PrimitiveTopology::LineStrip), - Mode::Triangles => Ok(PrimitiveTopology::TriangleList), - Mode::TriangleStrip => Ok(PrimitiveTopology::TriangleStrip), - mode => Err(GltfError::UnsupportedPrimitive { mode }), - } -} - -fn alpha_mode(material: &Material) -> AlphaMode { - match material.alpha_mode() { - gltf::material::AlphaMode::Opaque => AlphaMode::Opaque, - gltf::material::AlphaMode::Mask => AlphaMode::Mask(material.alpha_cutoff().unwrap_or(0.5)), - gltf::material::AlphaMode::Blend => AlphaMode::Blend, - } -} - /// Loads the raw glTF buffer data for a specific glTF file. async fn load_buffers( gltf: &gltf::Gltf, @@ -1928,33 +1607,16 @@ async fn load_buffers( Ok(buffer_data) } -enum ImageOrPath { - Image { - image: Image, - label: GltfAssetLabel, - }, - Path { - path: PathBuf, - is_srgb: bool, - sampler_descriptor: ImageSamplerDescriptor, - }, -} - struct DataUri<'a> { - mime_type: &'a str, - base64: bool, - data: &'a str, -} - -fn split_once(input: &str, delimiter: char) -> Option<(&str, &str)> { - let mut iter = input.splitn(2, delimiter); - Some((iter.next()?, iter.next()?)) + pub mime_type: &'a str, + pub base64: bool, + pub data: &'a str, } impl<'a> DataUri<'a> { fn parse(uri: &'a str) -> Result, ()> { let uri = uri.strip_prefix("data:").ok_or(())?; - let (mime_type, data) = split_once(uri, ',').ok_or(())?; + let (mime_type, data) = Self::split_once(uri, ',').ok_or(())?; let (mime_type, base64) = match mime_type.strip_suffix(";base64") { Some(mime_type) => (mime_type, true), @@ -1975,15 +1637,64 @@ impl<'a> DataUri<'a> { Ok(self.data.as_bytes().to_owned()) } } + + fn split_once(input: &str, delimiter: char) -> Option<(&str, &str)> { + let mut iter = input.splitn(2, delimiter); + Some((iter.next()?, iter.next()?)) + } } -pub(super) struct PrimitiveMorphAttributesIter<'s>( +enum ImageOrPath { + Image { + image: Image, + label: GltfAssetLabel, + }, + Path { + path: PathBuf, + is_srgb: bool, + sampler_descriptor: ImageSamplerDescriptor, + }, +} + +impl ImageOrPath { + // TODO: use the threaded impl on wasm once wasm thread pool doesn't deadlock on it + // See https://github.com/bevyengine/bevy/issues/1924 for more details + // The taskpool use is also avoided when there is only one texture for performance reasons and + // to avoid https://github.com/bevyengine/bevy/pull/2725 + // PERF: could this be a Vec instead? Are gltf texture indices dense? + fn process_loaded_texture( + self, + load_context: &mut LoadContext, + handles: &mut Vec>, + ) { + let handle = match self { + ImageOrPath::Image { label, image } => { + load_context.add_labeled_asset(label.to_string(), image) + } + ImageOrPath::Path { + path, + is_srgb, + sampler_descriptor, + } => load_context + .loader() + .with_settings(move |settings: &mut ImageLoaderSettings| { + settings.is_srgb = is_srgb; + settings.sampler = ImageSampler::Descriptor(sampler_descriptor.clone()); + }) + .load(path), + }; + handles.push(handle); + } +} + +struct PrimitiveMorphAttributesIter<'s>( pub ( Option>, Option>, Option>, ), ); + impl<'s> Iterator for PrimitiveMorphAttributesIter<'s> { type Item = MorphAttributes; @@ -2003,379 +1714,24 @@ impl<'s> Iterator for PrimitiveMorphAttributesIter<'s> { } } +/// A helper structure for `load_node` that contains information about the +/// nearest ancestor animation root. +#[cfg(feature = "bevy_animation")] +#[derive(Clone)] +struct AnimationContext { + /// The nearest ancestor animation root. + pub root: Entity, + /// The path to the animation root. This is used for constructing the + /// animation target UUIDs. + pub path: SmallVec<[Name; 8]>, +} + #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct MorphTargetNames { pub target_names: Vec, } -// A helper structure for `load_node` that contains information about the -// nearest ancestor animation root. -#[cfg(feature = "bevy_animation")] -#[derive(Clone)] -struct AnimationContext { - // The nearest ancestor animation root. - root: Entity, - // The path to the animation root. This is used for constructing the - // animation target UUIDs. - path: SmallVec<[Name; 8]>, -} - -/// Parsed data from the `KHR_materials_clearcoat` extension. -/// -/// See the specification: -/// -#[derive(Default)] -struct ClearcoatExtension { - clearcoat_factor: Option, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_channel: UvChannel, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_texture: Option>, - clearcoat_roughness_factor: Option, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_roughness_channel: UvChannel, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_roughness_texture: Option>, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_normal_channel: UvChannel, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_normal_texture: Option>, -} - -impl ClearcoatExtension { - #[expect( - clippy::allow_attributes, - reason = "`unused_variables` is not always linted" - )] - #[allow( - unused_variables, - reason = "Depending on what features are used to compile this crate, certain parameters may end up unused." - )] - fn parse( - load_context: &mut LoadContext, - document: &Document, - material: &Material, - ) -> Option { - let extension = material - .extensions()? - .get("KHR_materials_clearcoat")? - .as_object()?; - - #[cfg(feature = "pbr_multi_layer_material_textures")] - let (clearcoat_channel, clearcoat_texture) = parse_material_extension_texture( - load_context, - document, - material, - extension, - "clearcoatTexture", - "clearcoat", - ); - - #[cfg(feature = "pbr_multi_layer_material_textures")] - let (clearcoat_roughness_channel, clearcoat_roughness_texture) = - parse_material_extension_texture( - load_context, - document, - material, - extension, - "clearcoatRoughnessTexture", - "clearcoat roughness", - ); - - #[cfg(feature = "pbr_multi_layer_material_textures")] - let (clearcoat_normal_channel, clearcoat_normal_texture) = parse_material_extension_texture( - load_context, - document, - material, - extension, - "clearcoatNormalTexture", - "clearcoat normal", - ); - - Some(ClearcoatExtension { - clearcoat_factor: extension.get("clearcoatFactor").and_then(Value::as_f64), - clearcoat_roughness_factor: extension - .get("clearcoatRoughnessFactor") - .and_then(Value::as_f64), - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_channel, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_texture, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_roughness_channel, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_roughness_texture, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_normal_channel, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_normal_texture, - }) - } -} - -/// Parsed data from the `KHR_materials_anisotropy` extension. -/// -/// See the specification: -/// -#[derive(Default)] -struct AnisotropyExtension { - anisotropy_strength: Option, - anisotropy_rotation: Option, - #[cfg(feature = "pbr_anisotropy_texture")] - anisotropy_channel: UvChannel, - #[cfg(feature = "pbr_anisotropy_texture")] - anisotropy_texture: Option>, -} - -impl AnisotropyExtension { - #[expect( - clippy::allow_attributes, - reason = "`unused_variables` is not always linted" - )] - #[allow( - unused_variables, - reason = "Depending on what features are used to compile this crate, certain parameters may end up unused." - )] - fn parse( - load_context: &mut LoadContext, - document: &Document, - material: &Material, - ) -> Option { - let extension = material - .extensions()? - .get("KHR_materials_anisotropy")? - .as_object()?; - - #[cfg(feature = "pbr_anisotropy_texture")] - let (anisotropy_channel, anisotropy_texture) = extension - .get("anisotropyTexture") - .and_then(|value| value::from_value::(value.clone()).ok()) - .map(|json_info| { - ( - get_uv_channel(material, "anisotropy", json_info.tex_coord), - texture_handle_from_info(load_context, document, &json_info), - ) - }) - .unzip(); - - Some(AnisotropyExtension { - anisotropy_strength: extension.get("anisotropyStrength").and_then(Value::as_f64), - anisotropy_rotation: extension.get("anisotropyRotation").and_then(Value::as_f64), - #[cfg(feature = "pbr_anisotropy_texture")] - anisotropy_channel: anisotropy_channel.unwrap_or_default(), - #[cfg(feature = "pbr_anisotropy_texture")] - anisotropy_texture, - }) - } -} - -/// Parsed data from the `KHR_materials_specular` extension. -/// -/// We currently don't parse `specularFactor` and `specularTexture`, since -/// they're incompatible with Filament. -/// -/// Note that the map is a *specular map*, not a *reflectance map*. In Bevy and -/// Filament terms, the reflectance values in the specular map range from [0.0, -/// 0.5], rather than [0.0, 1.0]. This is an unfortunate -/// `KHR_materials_specular` specification requirement that stems from the fact -/// that glTF is specified in terms of a specular strength model, not the -/// reflectance model that Filament and Bevy use. A workaround, which is noted -/// in the [`StandardMaterial`] documentation, is to set the reflectance value -/// to 2.0, which spreads the specular map range from [0.0, 1.0] as normal. -/// -/// See the specification: -/// -#[derive(Default)] -struct SpecularExtension { - specular_factor: Option, - #[cfg(feature = "pbr_specular_textures")] - specular_channel: UvChannel, - #[cfg(feature = "pbr_specular_textures")] - specular_texture: Option>, - specular_color_factor: Option<[f64; 3]>, - #[cfg(feature = "pbr_specular_textures")] - specular_color_channel: UvChannel, - #[cfg(feature = "pbr_specular_textures")] - specular_color_texture: Option>, -} - -impl SpecularExtension { - fn parse( - _load_context: &mut LoadContext, - _document: &Document, - material: &Material, - ) -> Option { - let extension = material - .extensions()? - .get("KHR_materials_specular")? - .as_object()?; - - #[cfg(feature = "pbr_specular_textures")] - let (_specular_channel, _specular_texture) = parse_material_extension_texture( - _load_context, - _document, - material, - extension, - "specularTexture", - "specular", - ); - - #[cfg(feature = "pbr_specular_textures")] - let (_specular_color_channel, _specular_color_texture) = parse_material_extension_texture( - _load_context, - _document, - material, - extension, - "specularColorTexture", - "specular color", - ); - - Some(SpecularExtension { - specular_factor: extension.get("specularFactor").and_then(Value::as_f64), - #[cfg(feature = "pbr_specular_textures")] - specular_channel: _specular_channel, - #[cfg(feature = "pbr_specular_textures")] - specular_texture: _specular_texture, - specular_color_factor: extension - .get("specularColorFactor") - .and_then(Value::as_array) - .and_then(|json_array| { - if json_array.len() < 3 { - None - } else { - Some([ - json_array[0].as_f64()?, - json_array[1].as_f64()?, - json_array[2].as_f64()?, - ]) - } - }), - #[cfg(feature = "pbr_specular_textures")] - specular_color_channel: _specular_color_channel, - #[cfg(feature = "pbr_specular_textures")] - specular_color_texture: _specular_color_texture, - }) - } -} - -/// Parses a texture that's part of a material extension block and returns its -/// UV channel and image reference. -#[cfg(any( - feature = "pbr_specular_textures", - feature = "pbr_multi_layer_material_textures" -))] -fn parse_material_extension_texture( - load_context: &mut LoadContext, - document: &Document, - material: &Material, - extension: &Map, - texture_name: &str, - texture_kind: &str, -) -> (UvChannel, Option>) { - match extension - .get(texture_name) - .and_then(|value| value::from_value::(value.clone()).ok()) - { - Some(json_info) => ( - get_uv_channel(material, texture_kind, json_info.tex_coord), - Some(texture_handle_from_info(load_context, document, &json_info)), - ), - None => (UvChannel::default(), None), - } -} - -/// Returns the index (within the `textures` array) of the texture with the -/// given field name in the data for the material extension with the given name, -/// if there is one. -fn material_extension_texture_index( - material: &Material, - extension_name: &str, - texture_field_name: &str, -) -> Option { - Some( - value::from_value::( - material - .extensions()? - .get(extension_name)? - .as_object()? - .get(texture_field_name)? - .clone(), - ) - .ok()? - .index - .value(), - ) -} - -/// Returns true if the material needs mesh tangents in order to be successfully -/// rendered. -/// -/// We generate them if this function returns true. -fn material_needs_tangents(material: &Material) -> bool { - if material.normal_texture().is_some() { - return true; - } - - #[cfg(feature = "pbr_multi_layer_material_textures")] - if material_extension_texture_index( - material, - "KHR_materials_clearcoat", - "clearcoatNormalTexture", - ) - .is_some() - { - return true; - } - - false -} - -/// Checks all glTF nodes for cycles, starting at the scene root. -#[expect( - clippy::result_large_err, - reason = "need to be signature compatible with `load_gltf`" -)] -fn check_gltf_for_cycles(gltf: &gltf::Gltf) -> Result<(), GltfError> { - // Initialize with the scene roots. - let mut roots = FixedBitSet::with_capacity(gltf.nodes().len()); - for root in gltf.scenes().flat_map(|scene| scene.nodes()) { - roots.insert(root.index()); - } - - // Check each one. - let mut visited = FixedBitSet::with_capacity(gltf.nodes().len()); - for root in roots.ones() { - check(gltf.nodes().nth(root).unwrap(), &mut visited)?; - } - return Ok(()); - - // Depth first search. - #[expect( - clippy::result_large_err, - reason = "need to be signature compatible with `load_gltf`" - )] - fn check(node: Node, visited: &mut FixedBitSet) -> Result<(), GltfError> { - // Do we have a cycle? - if visited.contains(node.index()) { - return Err(GltfError::CircularChildren(format!( - "glTF nodes form a cycle: {} -> {}", - visited.ones().map(|bit| bit.to_string()).join(" -> "), - node.index() - ))); - } - - // Recurse. - visited.insert(node.index()); - for kid in node.children() { - check(kid, visited)?; - } - visited.remove(node.index()); - - Ok(()) - } -} - #[cfg(test)] mod test { use std::path::Path; @@ -2391,7 +1747,8 @@ mod test { }; use bevy_ecs::{resource::Resource, world::World}; use bevy_log::LogPlugin; - use bevy_render::mesh::{skinning::SkinnedMeshInverseBindposes, MeshPlugin}; + use bevy_mesh::skinning::SkinnedMeshInverseBindposes; + use bevy_render::mesh::MeshPlugin; use bevy_scene::ScenePlugin; fn test_app(dir: Dir) -> App { diff --git a/crates/bevy_gltf/src/vertex_attributes.rs b/crates/bevy_gltf/src/vertex_attributes.rs index 2a9cb2cfab..d4ae811c90 100644 --- a/crates/bevy_gltf/src/vertex_attributes.rs +++ b/crates/bevy_gltf/src/vertex_attributes.rs @@ -1,9 +1,5 @@ -use bevy_platform_support::collections::HashMap; -use bevy_render::{ - mesh::{MeshVertexAttribute, VertexAttributeValues as Values}, - prelude::Mesh, - render_resource::VertexFormat, -}; +use bevy_mesh::{Mesh, MeshVertexAttribute, VertexAttributeValues as Values, VertexFormat}; +use bevy_platform::collections::HashMap; use gltf::{ accessor::{DataType, Dimensions}, mesh::util::{ReadColors, ReadJoints, ReadTexCoords, ReadWeights}, diff --git a/crates/bevy_image/Cargo.toml b/crates/bevy_image/Cargo.toml index fc990448e4..a90a3abb80 100644 --- a/crates/bevy_image/Cargo.toml +++ b/crates/bevy_image/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_image" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides image types for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -11,7 +11,9 @@ keywords = ["bevy"] [features] default = ["bevy_reflect"] -bevy_reflect = ["dep:bevy_reflect", "bevy_math/bevy_reflect"] +# bevy_reflect can't optional as it's needed for TypePath +# this feature only control reflection in bevy_image +bevy_reflect = ["bevy_math/bevy_reflect"] # Image formats basis-universal = ["dep:basis-universal"] @@ -30,11 +32,7 @@ qoi = ["image/qoi"] tga = ["image/tga"] tiff = ["image/tiff"] webp = ["image/webp"] -serialize = [ - "bevy_reflect", - "bevy_platform_support/serialize", - "bevy_utils/serde", -] +serialize = ["bevy_reflect", "bevy_platform/serialize", "bevy_utils/serde"] # For ktx2 supercompression zlib = ["flate2"] @@ -49,11 +47,9 @@ bevy_color = { path = "../bevy_color", version = "0.16.0-dev", features = [ "wgpu-types", ] } bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ - "bevy", -], optional = true } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false, features = [ +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ "std", ] } @@ -73,7 +69,7 @@ ddsfile = { version = "0.5.2", optional = true } ktx2 = { version = "0.3.0", optional = true } # For ktx2 supercompression flate2 = { version = "1.0.22", optional = true } -ruzstd = { version = "0.7.0", optional = true } +ruzstd = { version = "0.8.0", optional = true } # For transcoding of UASTC/ETC1S universal formats, and for .basis file support basis-universal = { version = "0.3.0", optional = true } tracing = { version = "0.1", default-features = false, features = ["std"] } @@ -81,7 +77,6 @@ half = { version = "2.4.1" } [dev-dependencies] bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } -bevy_sprite = { path = "../bevy_sprite", version = "0.16.0-dev" } [lints] workspace = true diff --git a/crates/bevy_image/src/basis.rs b/crates/bevy_image/src/basis.rs index 7772f38c0b..553140b002 100644 --- a/crates/bevy_image/src/basis.rs +++ b/crates/bevy_image/src/basis.rs @@ -116,7 +116,7 @@ pub fn basis_buffer_to_image( ))) } }; - image.data = transcoded; + image.data = Some(transcoded); Ok(image) } diff --git a/crates/bevy_image/src/compressed_image_saver.rs b/crates/bevy_image/src/compressed_image_saver.rs index 4bd68fdafc..c2adc2c029 100644 --- a/crates/bevy_image/src/compressed_image_saver.rs +++ b/crates/bevy_image/src/compressed_image_saver.rs @@ -11,6 +11,8 @@ pub struct CompressedImageSaver; pub enum CompressedImageSaverError { #[error(transparent)] Io(#[from] std::io::Error), + #[error("Cannot compress an uninitialized image")] + UninitializedImage, } impl AssetSaver for CompressedImageSaver { @@ -42,7 +44,10 @@ impl AssetSaver for CompressedImageSaver { let mut source_image = compressor_params.source_image_mut(0); let size = image.size(); - source_image.init(&image.data, size.x, size.y, 4); + let Some(ref data) = image.data else { + return Err(CompressedImageSaverError::UninitializedImage); + }; + source_image.init(data, size.x, size.y, 4); let mut compressor = basis_universal::Compressor::new(4); #[expect( diff --git a/crates/bevy_image/src/dds.rs b/crates/bevy_image/src/dds.rs index a88120d336..8dc58ad482 100644 --- a/crates/bevy_image/src/dds.rs +++ b/crates/bevy_image/src/dds.rs @@ -8,11 +8,10 @@ use wgpu_types::{ #[cfg(debug_assertions)] use {bevy_utils::once, tracing::warn}; -use super::{CompressedImageFormats, Image, TextureError}; +use super::{CompressedImageFormats, Image, TextureError, TranscodeFormat}; #[cfg(feature = "dds")] pub fn dds_buffer_to_image( - #[cfg(debug_assertions)] name: String, buffer: &[u8], supported_compressed_formats: CompressedImageFormats, is_srgb: bool, @@ -20,7 +19,18 @@ pub fn dds_buffer_to_image( let mut cursor = Cursor::new(buffer); let dds = Dds::read(&mut cursor) .map_err(|error| TextureError::InvalidData(format!("Failed to parse DDS file: {error}")))?; - let texture_format = dds_format_to_texture_format(&dds, is_srgb)?; + let (texture_format, transcode_format) = match dds_format_to_texture_format(&dds, is_srgb) { + Ok(format) => (format, None), + Err(TextureError::FormatRequiresTranscodingError(TranscodeFormat::Rgb8)) => { + let format = if is_srgb { + TextureFormat::Bgra8UnormSrgb + } else { + TextureFormat::Bgra8Unorm + }; + (format, Some(TranscodeFormat::Rgb8)) + } + Err(error) => return Err(error), + }; if !supported_compressed_formats.supports(texture_format) { return Err(TextureError::UnsupportedTextureFormat(format!( "Format not supported by this GPU: {texture_format:?}", @@ -54,10 +64,7 @@ pub fn dds_buffer_to_image( let mip_map_level = match dds.get_num_mipmap_levels() { 0 => { #[cfg(debug_assertions)] - once!(warn!( - "Mipmap levels for texture {} are 0, bumping them to 1", - name - )); + once!(warn!("Mipmap levels for texture are 0, bumping them to 1",)); 1 } t => t, @@ -66,10 +73,14 @@ pub fn dds_buffer_to_image( image.texture_descriptor.format = texture_format; image.texture_descriptor.dimension = if dds.get_depth() > 1 { TextureDimension::D3 - } else if image.is_compressed() || dds.get_height() > 1 { - TextureDimension::D2 - } else { + // 1x1 textures should generally be interpreted as solid 2D + } else if ((dds.get_width() > 1 || dds.get_height() > 1) + && !(dds.get_width() > 1 && dds.get_height() > 1)) + && !image.is_compressed() + { TextureDimension::D1 + } else { + TextureDimension::D2 }; if is_cubemap { let dimension = if image.texture_descriptor.size.depth_or_array_layers > 6 { @@ -82,7 +93,29 @@ pub fn dds_buffer_to_image( ..Default::default() }); } - image.data = dds.data; + + // DDS mipmap layout is directly compatible with wgpu's layout (Slice -> Face -> Mip): + // https://learn.microsoft.com/fr-fr/windows/win32/direct3ddds/dx-graphics-dds-reference + image.data = if let Some(transcode_format) = transcode_format { + match transcode_format { + TranscodeFormat::Rgb8 => { + let data = dds + .data + .chunks_exact(3) + .flat_map(|pixel| [pixel[0], pixel[1], pixel[2], u8::MAX]) + .collect(); + Some(data) + } + _ => { + return Err(TextureError::TranscodeError(format!( + "unsupported transcode from {transcode_format:?} to {texture_format:?}" + ))) + } + } + } else { + Some(dds.data) + }; + Ok(image) } @@ -108,6 +141,9 @@ pub fn dds_format_to_texture_format( TextureFormat::Bgra8Unorm } } + D3DFormat::R8G8B8 => { + return Err(TextureError::FormatRequiresTranscodingError(TranscodeFormat::Rgb8)); + }, D3DFormat::G16R16 => TextureFormat::Rg16Uint, D3DFormat::A2B10G10R10 => TextureFormat::Rgb10a2Unorm, D3DFormat::A8L8 => TextureFormat::Rg8Uint, @@ -149,7 +185,6 @@ pub fn dds_format_to_texture_format( // FIXME: Map to argb format and user has to know to ignore the alpha channel? | D3DFormat::X8B8G8R8 | D3DFormat::A2R10G10B10 - | D3DFormat::R8G8B8 | D3DFormat::X1R5G5B5 | D3DFormat::A4R4G4B4 | D3DFormat::X4R4G4B4 @@ -370,10 +405,10 @@ mod test { 0x49, 0x92, 0x24, 0x16, 0x95, 0xae, 0x42, 0xfc, 0, 0xaa, 0x55, 0xff, 0xff, 0x49, 0x92, 0x24, 0x49, 0x92, 0x24, 0xd8, 0xad, 0xae, 0x42, 0xaf, 0x0a, 0xaa, 0x55, ]; - let r = dds_buffer_to_image("".into(), &buffer, CompressedImageFormats::BC, true); + let r = dds_buffer_to_image(&buffer, CompressedImageFormats::BC, true); assert!(r.is_ok()); if let Ok(r) = r { - fake_wgpu_create_texture_with_data(&r.texture_descriptor, &r.data); + fake_wgpu_create_texture_with_data(&r.texture_descriptor, r.data.as_ref().unwrap()); } } } diff --git a/crates/bevy_image/src/dynamic_texture_atlas_builder.rs b/crates/bevy_image/src/dynamic_texture_atlas_builder.rs index 8944e74e74..e8b812194a 100644 --- a/crates/bevy_image/src/dynamic_texture_atlas_builder.rs +++ b/crates/bevy_image/src/dynamic_texture_atlas_builder.rs @@ -2,6 +2,20 @@ use crate::{Image, TextureAtlasLayout, TextureFormatPixelInfo as _}; use bevy_asset::RenderAssetUsages; use bevy_math::{URect, UVec2}; use guillotiere::{size2, Allocation, AtlasAllocator}; +use thiserror::Error; +use tracing::error; + +#[derive(Debug, Error)] +pub enum DynamicTextureAtlasBuilderError { + #[error("Couldn't allocate space to add the image requested")] + FailedToAllocateSpace, + /// Attempted to add a texture to an uninitialized atlas + #[error("cannot add texture to uninitialized atlas texture")] + UninitializedAtlas, + /// Attempted to add an uninitialized texture to an atlas + #[error("cannot add uninitialized texture to atlas")] + UninitializedSourceTexture, +} /// Helper utility to update [`TextureAtlasLayout`] on the fly. /// @@ -42,7 +56,7 @@ impl DynamicTextureAtlasBuilder { atlas_layout: &mut TextureAtlasLayout, texture: &Image, atlas_texture: &mut Image, - ) -> Option { + ) -> Result { let allocation = self.atlas_allocator.allocate(size2( (texture.width() + self.padding).try_into().unwrap(), (texture.height() + self.padding).try_into().unwrap(), @@ -53,12 +67,12 @@ impl DynamicTextureAtlasBuilder { "The atlas_texture image must have the RenderAssetUsages::MAIN_WORLD usage flag set" ); - self.place_texture(atlas_texture, allocation, texture); + self.place_texture(atlas_texture, allocation, texture)?; let mut rect: URect = to_rect(allocation.rectangle); rect.max = rect.max.saturating_sub(UVec2::splat(self.padding)); - Some(atlas_layout.add_texture(rect)) + Ok(atlas_layout.add_texture(rect)) } else { - None + Err(DynamicTextureAtlasBuilderError::FailedToAllocateSpace) } } @@ -67,7 +81,7 @@ impl DynamicTextureAtlasBuilder { atlas_texture: &mut Image, allocation: Allocation, texture: &Image, - ) { + ) -> Result<(), DynamicTextureAtlasBuilderError> { let mut rect = allocation.rectangle; rect.max.x -= self.padding as i32; rect.max.y -= self.padding as i32; @@ -75,14 +89,20 @@ impl DynamicTextureAtlasBuilder { let rect_width = rect.width() as usize; let format_size = atlas_texture.texture_descriptor.format.pixel_size(); + let Some(ref mut atlas_data) = atlas_texture.data else { + return Err(DynamicTextureAtlasBuilderError::UninitializedAtlas); + }; + let Some(ref data) = texture.data else { + return Err(DynamicTextureAtlasBuilderError::UninitializedSourceTexture); + }; for (texture_y, bound_y) in (rect.min.y..rect.max.y).map(|i| i as usize).enumerate() { let begin = (bound_y * atlas_width + rect.min.x as usize) * format_size; let end = begin + rect_width * format_size; let texture_begin = texture_y * rect_width * format_size; let texture_end = texture_begin + rect_width * format_size; - atlas_texture.data[begin..end] - .copy_from_slice(&texture.data[texture_begin..texture_end]); + atlas_data[begin..end].copy_from_slice(&data[texture_begin..texture_end]); } + Ok(()) } } diff --git a/crates/bevy_image/src/image.rs b/crates/bevy_image/src/image.rs index cc89cae05a..41b698b78d 100644 --- a/crates/bevy_image/src/image.rs +++ b/crates/bevy_image/src/image.rs @@ -4,6 +4,8 @@ use super::basis::*; use super::dds::*; #[cfg(feature = "ktx2")] use super::ktx2::*; +#[cfg(not(feature = "bevy_reflect"))] +use bevy_reflect::TypePath; #[cfg(feature = "bevy_reflect")] use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -13,6 +15,7 @@ use bevy_math::{AspectRatio, UVec2, UVec3, Vec2}; use core::hash::Hash; use serde::{Deserialize, Serialize}; use thiserror::Error; +use tracing::warn; use wgpu_types::{ AddressMode, CompareFunction, Extent3d, Features, FilterMode, SamplerBorderColor, SamplerDescriptor, TextureDescriptor, TextureDimension, TextureFormat, TextureUsages, @@ -335,10 +338,15 @@ impl ImageFormat { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(opaque, Default, Debug) + reflect(opaque, Default, Debug, Clone) )] +#[cfg_attr(not(feature = "bevy_reflect"), derive(TypePath))] pub struct Image { - pub data: Vec, + /// Raw pixel data. + /// If the image is being used as a storage texture which doesn't need to be initialized by the + /// CPU, then this should be `None` + /// Otherwise, it should always be `Some` + pub data: Option>, // TODO: this nesting makes accessing Image metadata verbose. Either flatten out descriptor or add accessors pub texture_descriptor: TextureDescriptor, &'static [TextureFormat]>, /// The [`ImageSampler`] to use during rendering. @@ -691,28 +699,9 @@ impl From>> for ImageSamplerDescriptor { impl Default for Image { /// default is a 1x1x1 all '1.0' texture fn default() -> Self { - let format = TextureFormat::bevy_default(); - let data = vec![255; format.pixel_size()]; - Image { - data, - texture_descriptor: TextureDescriptor { - size: Extent3d { - width: 1, - height: 1, - depth_or_array_layers: 1, - }, - format, - dimension: TextureDimension::D2, - label: None, - mip_level_count: 1, - sample_count: 1, - usage: TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_DST, - view_formats: &[], - }, - sampler: ImageSampler::Default, - texture_view_descriptor: None, - asset_usage: RenderAssetUsages::default(), - } + let mut image = Image::default_uninit(); + image.data = Some(vec![255; image.texture_descriptor.format.pixel_size()]); + image } } @@ -734,17 +723,36 @@ impl Image { data.len(), "Pixel data, size and format have to match", ); - let mut image = Self { - data, - ..Default::default() - }; - image.texture_descriptor.dimension = dimension; - image.texture_descriptor.size = size; - image.texture_descriptor.format = format; - image.asset_usage = asset_usage; + let mut image = Image::new_uninit(size, dimension, format, asset_usage); + image.data = Some(data); image } + /// Exactly the same as [`Image::new`], but doesn't initialize the image + pub fn new_uninit( + size: Extent3d, + dimension: TextureDimension, + format: TextureFormat, + asset_usage: RenderAssetUsages, + ) -> Self { + Image { + data: None, + texture_descriptor: TextureDescriptor { + size, + format, + dimension, + label: None, + mip_level_count: 1, + sample_count: 1, + usage: TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_DST, + view_formats: &[], + }, + sampler: ImageSampler::Default, + texture_view_descriptor: None, + asset_usage, + } + } + /// A transparent white 1x1x1 image. /// /// Contrast to [`Image::default`], which is opaque. @@ -755,26 +763,30 @@ impl Image { let format = TextureFormat::bevy_default(); debug_assert!(format.pixel_size() == 4); let data = vec![255, 255, 255, 0]; - Image { - data, - texture_descriptor: TextureDescriptor { - size: Extent3d { - width: 1, - height: 1, - depth_or_array_layers: 1, - }, - format, - dimension: TextureDimension::D2, - label: None, - mip_level_count: 1, - sample_count: 1, - usage: TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_DST, - view_formats: &[], + Image::new( + Extent3d { + width: 1, + height: 1, + depth_or_array_layers: 1, }, - sampler: ImageSampler::Default, - texture_view_descriptor: None, - asset_usage: RenderAssetUsages::default(), - } + TextureDimension::D2, + data, + format, + RenderAssetUsages::default(), + ) + } + /// Creates a new uninitialized 1x1x1 image + pub fn default_uninit() -> Image { + Image::new_uninit( + Extent3d { + width: 1, + height: 1, + depth_or_array_layers: 1, + }, + TextureDimension::D2, + TextureFormat::bevy_default(), + RenderAssetUsages::default(), + ) } /// Creates a new image from raw binary data and the corresponding metadata, by filling @@ -789,12 +801,7 @@ impl Image { format: TextureFormat, asset_usage: RenderAssetUsages, ) -> Self { - let mut value = Image::default(); - value.texture_descriptor.format = format; - value.texture_descriptor.dimension = dimension; - value.asset_usage = asset_usage; - value.resize(size); - + let byte_len = format.pixel_size() * size.volume(); debug_assert_eq!( pixel.len() % format.pixel_size(), 0, @@ -802,15 +809,12 @@ impl Image { format.pixel_size(), ); debug_assert!( - pixel.len() <= value.data.len(), + pixel.len() <= byte_len, "Fill data must fit within pixel buffer (expected {}B).", - value.data.len(), + byte_len, ); - - for current_pixel in value.data.chunks_exact_mut(pixel.len()) { - current_pixel.copy_from_slice(pixel); - } - value + let data = pixel.iter().copied().cycle().take(byte_len).collect(); + Image::new(size, dimension, data, format, asset_usage) } /// Returns the width of a 2D image. @@ -849,10 +853,14 @@ impl Image { /// Does not properly resize the contents of the image, but only its internal `data` buffer. pub fn resize(&mut self, size: Extent3d) { self.texture_descriptor.size = size; - self.data.resize( - size.volume() * self.texture_descriptor.format.pixel_size(), - 0, - ); + if let Some(ref mut data) = self.data { + data.resize( + size.volume() * self.texture_descriptor.format.pixel_size(), + 0, + ); + } else { + warn!("Resized an uninitialized image. Directly modify image.texture_descriptor.size instead"); + } } /// Changes the `size`, asserting that the total number of data elements (pixels) remains the @@ -923,16 +931,11 @@ impl Image { /// Load a bytes buffer in a [`Image`], according to type `image_type`, using the `image` /// crate pub fn from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] name: String, buffer: &[u8], image_type: ImageType, - #[expect( - clippy::allow_attributes, - reason = "`unused_variables` may not always lint" - )] - #[allow( - unused_variables, - reason = "`supported_compressed_formats` is needed where the image format is `Basis`, `Dds`, or `Ktx2`; if these are disabled, then `supported_compressed_formats` is unused." + #[cfg_attr( + not(any(feature = "basis-universal", feature = "dds", feature = "ktx2")), + expect(unused_variables, reason = "only used with certain features") )] supported_compressed_formats: CompressedImageFormats, is_srgb: bool, @@ -953,13 +956,7 @@ impl Image { basis_buffer_to_image(buffer, supported_compressed_formats, is_srgb)? } #[cfg(feature = "dds")] - ImageFormat::Dds => dds_buffer_to_image( - #[cfg(debug_assertions)] - name, - buffer, - supported_compressed_formats, - is_srgb, - )?, + ImageFormat::Dds => dds_buffer_to_image(buffer, supported_compressed_formats, is_srgb)?, #[cfg(feature = "ktx2")] ImageFormat::Ktx2 => { ktx2_buffer_to_image(buffer, supported_compressed_formats, is_srgb)? @@ -1035,16 +1032,18 @@ impl Image { #[inline(always)] pub fn pixel_bytes(&self, coords: UVec3) -> Option<&[u8]> { let len = self.texture_descriptor.format.pixel_size(); + let data = self.data.as_ref()?; self.pixel_data_offset(coords) - .map(|start| &self.data[start..(start + len)]) + .map(|start| &data[start..(start + len)]) } /// Get a mutable reference to the data bytes where a specific pixel's value is stored #[inline(always)] pub fn pixel_bytes_mut(&mut self, coords: UVec3) -> Option<&mut [u8]> { let len = self.texture_descriptor.format.pixel_size(); - self.pixel_data_offset(coords) - .map(|start| &mut self.data[start..(start + len)]) + let offset = self.pixel_data_offset(coords); + let data = self.data.as_mut()?; + offset.map(|start| &mut data[start..(start + len)]) } /// Read the color of a specific pixel (1D texture). @@ -1484,19 +1483,20 @@ pub enum DataFormat { Rg, } +/// Texture data need to be transcoded from this format for use with `wgpu`. #[derive(Clone, Copy, Debug)] pub enum TranscodeFormat { Etc1s, Uastc(DataFormat), - // Has to be transcoded to R8Unorm for use with `wgpu` + // Has to be transcoded to R8Unorm for use with `wgpu`. R8UnormSrgb, - // Has to be transcoded to R8G8Unorm for use with `wgpu` + // Has to be transcoded to R8G8Unorm for use with `wgpu`. Rg8UnormSrgb, - // Has to be transcoded to Rgba8 for use with `wgpu` + // Has to be transcoded to Rgba8 for use with `wgpu`. Rgb8, } -/// An error that occurs when accessing specific pixels in a texture +/// An error that occurs when accessing specific pixels in a texture. #[derive(Error, Debug)] pub enum TextureAccessError { #[error("out of bounds (x: {x}, y: {y}, z: {z})")] @@ -1507,25 +1507,34 @@ pub enum TextureAccessError { WrongDimension, } -/// An error that occurs when loading a texture +/// An error that occurs when loading a texture. #[derive(Error, Debug)] pub enum TextureError { + /// Image MIME type is invalid. #[error("invalid image mime type: {0}")] InvalidImageMimeType(String), + /// Image extension is invalid. #[error("invalid image extension: {0}")] InvalidImageExtension(String), + /// Failed to load an image. #[error("failed to load an image: {0}")] ImageError(#[from] image::ImageError), + /// Texture format isn't supported. #[error("unsupported texture format: {0}")] UnsupportedTextureFormat(String), + /// Supercompression isn't supported. #[error("supercompression not supported: {0}")] SuperCompressionNotSupported(String), - #[error("failed to load an image: {0}")] + /// Failed to decompress an image. + #[error("failed to decompress an image: {0}")] SuperDecompressionError(String), + /// Invalid data. #[error("invalid data: {0}")] InvalidData(String), + /// Transcode error. #[error("transcode error: {0}")] TranscodeError(String), + /// Format requires transcoding. #[error("format requires transcoding: {0:?}")] FormatRequiresTranscodingError(TranscodeFormat), /// Only cubemaps with six faces are supported. diff --git a/crates/bevy_image/src/image_loader.rs b/crates/bevy_image/src/image_loader.rs index 949ee78e49..0ef1213b46 100644 --- a/crates/bevy_image/src/image_loader.rs +++ b/crates/bevy_image/src/image_loader.rs @@ -81,7 +81,7 @@ impl ImageLoader { } } -#[derive(Serialize, Deserialize, Default, Debug)] +#[derive(Serialize, Deserialize, Default, Debug, Clone)] pub enum ImageFormatSetting { #[default] FromExtension, @@ -89,7 +89,7 @@ pub enum ImageFormatSetting { Guess, } -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, Debug, Clone)] pub struct ImageLoaderSettings { pub format: ImageFormatSetting, pub is_srgb: bool, @@ -150,8 +150,6 @@ impl AssetLoader for ImageLoader { } }; Ok(Image::from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] - load_context.path().display().to_string(), &bytes, image_type, self.supported_compressed_formats, diff --git a/crates/bevy_image/src/image_texture_conversion.rs b/crates/bevy_image/src/image_texture_conversion.rs index 7956e810cb..1eb3b78b1e 100644 --- a/crates/bevy_image/src/image_texture_conversion.rs +++ b/crates/bevy_image/src/image_texture_conversion.rs @@ -170,22 +170,26 @@ impl Image { /// /// To convert [`Image`] to a different format see: [`Image::convert`]. pub fn try_into_dynamic(self) -> Result { + let width = self.width(); + let height = self.height(); + let Some(data) = self.data else { + return Err(IntoDynamicImageError::UninitializedImage); + }; match self.texture_descriptor.format { - TextureFormat::R8Unorm => ImageBuffer::from_raw(self.width(), self.height(), self.data) - .map(DynamicImage::ImageLuma8), + TextureFormat::R8Unorm => { + ImageBuffer::from_raw(width, height, data).map(DynamicImage::ImageLuma8) + } TextureFormat::Rg8Unorm => { - ImageBuffer::from_raw(self.width(), self.height(), self.data) - .map(DynamicImage::ImageLumaA8) + ImageBuffer::from_raw(width, height, data).map(DynamicImage::ImageLumaA8) } TextureFormat::Rgba8UnormSrgb => { - ImageBuffer::from_raw(self.width(), self.height(), self.data) - .map(DynamicImage::ImageRgba8) + ImageBuffer::from_raw(width, height, data).map(DynamicImage::ImageRgba8) } // This format is commonly used as the format for the swapchain texture // This conversion is added here to support screenshots TextureFormat::Bgra8UnormSrgb | TextureFormat::Bgra8Unorm => { - ImageBuffer::from_raw(self.width(), self.height(), { - let mut data = self.data; + ImageBuffer::from_raw(width, height, { + let mut data = data; for bgra in data.chunks_exact_mut(4) { bgra.swap(0, 2); } @@ -213,6 +217,10 @@ pub enum IntoDynamicImageError { /// Encountered an unknown error during conversion. #[error("Failed to convert into {0:?}.")] UnknownConversionError(TextureFormat), + + /// Tried to convert an image that has no texture data + #[error("Image has no texture data")] + UninitializedImage, } #[cfg(test)] diff --git a/crates/bevy_image/src/ktx2.rs b/crates/bevy_image/src/ktx2.rs index 3819a979a2..bffea83d10 100644 --- a/crates/bevy_image/src/ktx2.rs +++ b/crates/bevy_image/src/ktx2.rs @@ -61,7 +61,7 @@ pub fn ktx2_buffer_to_image( #[cfg(feature = "ruzstd")] SupercompressionScheme::Zstandard => { let mut cursor = std::io::Cursor::new(_level_data); - let mut decoder = ruzstd::StreamingDecoder::new(&mut cursor) + let mut decoder = ruzstd::decoding::StreamingDecoder::new(&mut cursor) .map_err(|err| TextureError::SuperDecompressionError(err.to_string()))?; let mut decompressed = Vec::new(); decoder.read_to_end(&mut decompressed).map_err(|err| { @@ -266,7 +266,7 @@ pub fn ktx2_buffer_to_image( // error cases have been handled let mut image = Image::default(); image.texture_descriptor.format = texture_format; - image.data = wgpu_data.into_iter().flatten().collect::>(); + image.data = Some(wgpu_data.into_iter().flatten().collect::>()); image.texture_descriptor.size = Extent3d { width, height, diff --git a/crates/bevy_image/src/texture_atlas.rs b/crates/bevy_image/src/texture_atlas.rs index 36e2acd864..b5b68b0c41 100644 --- a/crates/bevy_image/src/texture_atlas.rs +++ b/crates/bevy_image/src/texture_atlas.rs @@ -1,7 +1,9 @@ use bevy_app::prelude::*; use bevy_asset::{Asset, AssetApp as _, AssetId, Assets, Handle}; -use bevy_math::{URect, UVec2}; -use bevy_platform_support::collections::HashMap; +use bevy_math::{Rect, URect, UVec2}; +use bevy_platform::collections::HashMap; +#[cfg(not(feature = "bevy_reflect"))] +use bevy_reflect::TypePath; #[cfg(feature = "bevy_reflect")] use bevy_reflect::{std_traits::ReflectDefault, Reflect}; #[cfg(feature = "serialize")] @@ -51,7 +53,7 @@ impl TextureAtlasSources { }) } - /// Retrieves the texture *section* rectangle of the given `texture` handle. + /// Retrieves the texture *section* rectangle of the given `texture` handle in pixels. pub fn texture_rect( &self, layout: &TextureAtlasLayout, @@ -59,6 +61,20 @@ impl TextureAtlasSources { ) -> Option { layout.textures.get(self.texture_index(texture)?).cloned() } + + /// Retrieves the texture *section* rectangle of the given `texture` handle in UV coordinates. + /// These are within the range [0..1], as a fraction of the entire texture atlas' size. + pub fn uv_rect( + &self, + layout: &TextureAtlasLayout, + texture: impl Into>, + ) -> Option { + self.texture_rect(layout, texture).map(|rect| { + let rect = rect.as_rect(); + let size = layout.size.as_vec2(); + Rect::from_corners(rect.min / size, rect.max / size) + }) + } } /// Stores a map used to lookup the position of a texture in a [`TextureAtlas`]. @@ -73,12 +89,17 @@ impl TextureAtlasSources { /// /// [`TextureAtlasBuilder`]: crate::TextureAtlasBuilder #[derive(Asset, PartialEq, Eq, Debug, Clone)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( feature = "serialize", derive(serde::Serialize, serde::Deserialize), reflect(Serialize, Deserialize) )] +#[cfg_attr(not(feature = "bevy_reflect"), derive(TypePath))] pub struct TextureAtlasLayout { /// Total size of texture atlas. pub size: UVec2, @@ -186,7 +207,7 @@ impl TextureAtlasLayout { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Default, Debug, PartialEq, Hash) + reflect(Default, Debug, PartialEq, Hash, Clone) )] pub struct TextureAtlas { /// Texture atlas layout handle diff --git a/crates/bevy_image/src/texture_atlas_builder.rs b/crates/bevy_image/src/texture_atlas_builder.rs index a207b318f7..2f23331c8c 100644 --- a/crates/bevy_image/src/texture_atlas_builder.rs +++ b/crates/bevy_image/src/texture_atlas_builder.rs @@ -1,6 +1,6 @@ use bevy_asset::{AssetId, RenderAssetUsages}; use bevy_math::{URect, UVec2}; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use rectangle_pack::{ contains_smallest_box, pack_rects, volume_heuristic, GroupedRectsToPlace, PackedLocation, RectToInsert, TargetBin, @@ -18,6 +18,12 @@ pub enum TextureAtlasBuilderError { NotEnoughSpace, #[error("added a texture with the wrong format in an atlas")] WrongFormat, + /// Attempted to add a texture to an uninitialized atlas + #[error("cannot add texture to uninitialized atlas texture")] + UninitializedAtlas, + /// Attempted to add an uninitialized texture to an atlas + #[error("cannot add uninitialized texture to atlas")] + UninitializedSourceTexture, } #[derive(Debug)] @@ -105,7 +111,7 @@ impl<'a> TextureAtlasBuilder<'a> { texture: &Image, packed_location: &PackedLocation, padding: UVec2, - ) { + ) -> TextureAtlasBuilderResult<()> { let rect_width = (packed_location.width() - padding.x) as usize; let rect_height = (packed_location.height() - padding.y) as usize; let rect_x = packed_location.x() as usize; @@ -113,14 +119,20 @@ impl<'a> TextureAtlasBuilder<'a> { let atlas_width = atlas_texture.width() as usize; let format_size = atlas_texture.texture_descriptor.format.pixel_size(); + let Some(ref mut atlas_data) = atlas_texture.data else { + return Err(TextureAtlasBuilderError::UninitializedAtlas); + }; + let Some(ref data) = texture.data else { + return Err(TextureAtlasBuilderError::UninitializedSourceTexture); + }; for (texture_y, bound_y) in (rect_y..rect_y + rect_height).enumerate() { let begin = (bound_y * atlas_width + rect_x) * format_size; let end = begin + rect_width * format_size; let texture_begin = texture_y * rect_width * format_size; let texture_end = texture_begin + rect_width * format_size; - atlas_texture.data[begin..end] - .copy_from_slice(&texture.data[texture_begin..texture_end]); + atlas_data[begin..end].copy_from_slice(&data[texture_begin..texture_end]); } + Ok(()) } fn copy_converted_texture( @@ -128,9 +140,9 @@ impl<'a> TextureAtlasBuilder<'a> { atlas_texture: &mut Image, texture: &Image, packed_location: &PackedLocation, - ) { + ) -> TextureAtlasBuilderResult<()> { if self.format == texture.texture_descriptor.format { - Self::copy_texture_to_atlas(atlas_texture, texture, packed_location, self.padding); + Self::copy_texture_to_atlas(atlas_texture, texture, packed_location, self.padding)?; } else if let Some(converted_texture) = texture.convert(self.format) { debug!( "Converting texture from '{:?}' to '{:?}'", @@ -141,13 +153,14 @@ impl<'a> TextureAtlasBuilder<'a> { &converted_texture, packed_location, self.padding, - ); + )?; } else { error!( "Error converting texture from '{:?}' to '{:?}', ignoring", texture.texture_descriptor.format, self.format ); } + Ok(()) } /// Consumes the builder, and returns the newly created texture atlas and @@ -160,12 +173,11 @@ impl<'a> TextureAtlasBuilder<'a> { /// # Usage /// /// ```rust - /// # use bevy_sprite::prelude::*; /// # use bevy_ecs::prelude::*; /// # use bevy_asset::*; /// # use bevy_image::prelude::*; /// - /// fn my_system(mut commands: Commands, mut textures: ResMut>, mut layouts: ResMut>) { + /// fn my_system(mut textures: ResMut>, mut layouts: ResMut>) { /// // Declare your builder /// let mut builder = TextureAtlasBuilder::default(); /// // Customize it @@ -174,8 +186,6 @@ impl<'a> TextureAtlasBuilder<'a> { /// let (atlas_layout, atlas_sources, texture) = builder.build().unwrap(); /// let texture = textures.add(texture); /// let layout = layouts.add(atlas_layout); - /// // Spawn your sprite - /// commands.spawn(Sprite::from_atlas_image(texture, TextureAtlas::from(layout))); /// } /// ``` /// @@ -274,7 +284,7 @@ impl<'a> TextureAtlasBuilder<'a> { ); return Err(TextureAtlasBuilderError::WrongFormat); } - self.copy_converted_texture(&mut atlas_texture, texture, packed_location); + self.copy_converted_texture(&mut atlas_texture, texture, packed_location)?; } Ok(( diff --git a/crates/bevy_input/Cargo.toml b/crates/bevy_input/Cargo.toml index fbbec96a76..570273a00a 100644 --- a/crates/bevy_input/Cargo.toml +++ b/crates/bevy_input/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_input" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides input functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -24,10 +24,10 @@ bevy_reflect = [ ## Adds serialization support through `serde`. serialize = [ "serde", - "smol_str/serde", + "smol_str?/serde", "bevy_ecs/serialize", "bevy_math/serialize", - "bevy_platform_support/serialize", + "bevy_platform/serialize", ] ## Uses the small-string optimization provided by `smol_str`. @@ -44,7 +44,7 @@ std = [ "bevy_math/std", "bevy_utils/std", "bevy_reflect/std", - "bevy_platform_support/std", + "bevy_platform/std", ] ## `critical-section` provides the building blocks for synchronization primitives @@ -53,16 +53,7 @@ critical-section = [ "bevy_app/critical-section", "bevy_ecs/critical-section", "bevy_reflect?/critical-section", - "bevy_platform_support/critical-section", -] - -## `portable-atomic` provides additional platform support for atomic types and -## operations, even on targets without native support. -portable-atomic = [ - "bevy_app/portable-atomic", - "bevy_ecs/portable-atomic", - "bevy_reflect?/portable-atomic", - "bevy_platform_support/portable-atomic", + "bevy_platform/critical-section", ] ## Uses the `libm` maths library instead of the one provided in `std` and `core`. @@ -77,7 +68,7 @@ bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev", default-features bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ "glam", ], default-features = false, optional = true } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev", default-features = false } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false } # other serde = { version = "1", features = [ diff --git a/crates/bevy_input/src/axis.rs b/crates/bevy_input/src/axis.rs index acdf0135f0..99909762c7 100644 --- a/crates/bevy_input/src/axis.rs +++ b/crates/bevy_input/src/axis.rs @@ -1,7 +1,7 @@ //! The generic axis type. use bevy_ecs::resource::Resource; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; use core::hash::Hash; #[cfg(feature = "bevy_reflect")] diff --git a/crates/bevy_input/src/button_input.rs b/crates/bevy_input/src/button_input.rs index a1086edf66..bc28381ab4 100644 --- a/crates/bevy_input/src/button_input.rs +++ b/crates/bevy_input/src/button_input.rs @@ -1,7 +1,7 @@ //! The generic input type. use bevy_ecs::resource::Resource; -use bevy_platform_support::collections::HashSet; +use bevy_platform::collections::HashSet; use core::hash::Hash; #[cfg(feature = "bevy_reflect")] use { @@ -71,7 +71,7 @@ use { /// Reading and checking against the current set of pressed buttons: /// ```no_run /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, Update}; -/// # use bevy_ecs::{prelude::{IntoSystemConfigs, Res, Resource, resource_changed}, schedule::Condition}; +/// # use bevy_ecs::{prelude::{IntoScheduleConfigs, Res, Resource, resource_changed}, schedule::Condition}; /// # use bevy_input::{ButtonInput, prelude::{KeyCode, MouseButton}}; /// /// fn main() { diff --git a/crates/bevy_input/src/common_conditions.rs b/crates/bevy_input/src/common_conditions.rs index 560ea693d9..c1c486e249 100644 --- a/crates/bevy_input/src/common_conditions.rs +++ b/crates/bevy_input/src/common_conditions.rs @@ -6,7 +6,7 @@ use core::hash::Hash; /// /// ```no_run /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, Update}; -/// # use bevy_ecs::prelude::IntoSystemConfigs; +/// # use bevy_ecs::prelude::IntoScheduleConfigs; /// # use bevy_input::{common_conditions::input_toggle_active, prelude::KeyCode}; /// /// fn main() { @@ -25,7 +25,7 @@ use core::hash::Hash; /// you should use a custom resource or a state for that: /// ```no_run /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, Update}; -/// # use bevy_ecs::prelude::{IntoSystemConfigs, Res, ResMut, Resource}; +/// # use bevy_ecs::prelude::{IntoScheduleConfigs, Res, ResMut, Resource}; /// # use bevy_input::{common_conditions::input_just_pressed, prelude::KeyCode}; /// /// #[derive(Resource, Default)] @@ -74,7 +74,7 @@ where /// /// ```no_run /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, Update}; -/// # use bevy_ecs::prelude::IntoSystemConfigs; +/// # use bevy_ecs::prelude::IntoScheduleConfigs; /// # use bevy_input::{common_conditions::input_just_pressed, prelude::KeyCode}; /// fn main() { /// App::new() @@ -104,7 +104,7 @@ where mod tests { use super::*; use crate::prelude::KeyCode; - use bevy_ecs::schedule::{IntoSystemConfigs, Schedule}; + use bevy_ecs::schedule::{IntoScheduleConfigs, Schedule}; fn test_system() {} diff --git a/crates/bevy_input/src/gamepad.rs b/crates/bevy_input/src/gamepad.rs index 2da9916a7e..2b0148909c 100644 --- a/crates/bevy_input/src/gamepad.rs +++ b/crates/bevy_input/src/gamepad.rs @@ -12,12 +12,11 @@ use bevy_ecs::{ entity::Entity, event::{Event, EventReader, EventWriter}, name::Name, - prelude::require, system::{Commands, Query}, }; use bevy_math::ops; use bevy_math::Vec2; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; #[cfg(feature = "bevy_reflect")] use bevy_reflect::{std_traits::ReflectDefault, Reflect}; #[cfg(all(feature = "serialize", feature = "bevy_reflect"))] @@ -34,7 +33,11 @@ use thiserror::Error; /// /// This event is produced by `bevy_input`. #[derive(Event, Debug, Clone, PartialEq, From)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -57,7 +60,11 @@ pub enum GamepadEvent { /// /// This event type is used by `bevy_input` to feed its components. #[derive(Event, Debug, Clone, PartialEq, From)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -74,7 +81,11 @@ pub enum RawGamepadEvent { /// [`GamepadButton`] changed event unfiltered by [`GamepadSettings`]. #[derive(Event, Debug, Copy, Clone, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -102,7 +113,11 @@ impl RawGamepadButtonChangedEvent { /// [`GamepadAxis`] changed event unfiltered by [`GamepadSettings`]. #[derive(Event, Debug, Copy, Clone, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -131,7 +146,11 @@ impl RawGamepadAxisChangedEvent { /// A Gamepad connection event. Created when a connection to a gamepad /// is established and when a gamepad is disconnected. #[derive(Event, Debug, Clone, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -166,7 +185,11 @@ impl GamepadConnectionEvent { /// [`GamepadButton`] event triggered by a digital state change. #[derive(Event, Debug, Clone, Copy, PartialEq, Eq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -194,7 +217,11 @@ impl GamepadButtonStateChangedEvent { /// [`GamepadButton`] event triggered by an analog state change. #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -226,7 +253,11 @@ impl GamepadButtonChangedEvent { /// [`GamepadAxis`] event triggered by an analog state change. #[derive(Event, Debug, Clone, Copy, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "bevy_reflect", feature = "serialize"), reflect(Serialize, Deserialize) @@ -335,7 +366,11 @@ pub enum ButtonSettingsError { /// } /// ``` #[derive(Component, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Component))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Component, Default) +)] #[require(GamepadSettings)] pub struct Gamepad { /// The USB vendor ID as assigned by the USB-IF, if available. @@ -535,7 +570,7 @@ impl Default for Gamepad { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -624,7 +659,11 @@ impl GamepadButton { /// This is used to determine which axis has changed its value when receiving a /// gamepad axis event. It is also used in the [`Gamepad`] component. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Hash, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -635,16 +674,16 @@ pub enum GamepadAxis { LeftStickX, /// The vertical value of the left stick. LeftStickY, - /// The value of the left `Z` button. + /// Generally the throttle axis of a HOTAS setup. + /// Refer to [`GamepadButton::LeftTrigger2`] for the analog trigger on a gamepad controller. LeftZ, - /// The horizontal value of the right stick. RightStickX, /// The vertical value of the right stick. RightStickY, - /// The value of the right `Z` button. + /// The yaw of the main joystick, not supported on common gamepads. + /// Refer to [`GamepadButton::RightTrigger2`] for the analog trigger on a gamepad controller. RightZ, - /// Non-standard support for other axis types (i.e. HOTAS sliders, potentiometers, etc). Other(u8), } @@ -666,7 +705,11 @@ impl GamepadAxis { /// Encapsulation over [`GamepadAxis`] and [`GamepadButton`]. // This is done so Gamepad can share a single Axis and simplifies the API by having only one get/get_unclamped method #[derive(Debug, Copy, Clone, Eq, Hash, PartialEq, From)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Hash, PartialEq, Clone) +)] pub enum GamepadInput { /// A [`GamepadAxis`]. Axis(GamepadAxis), @@ -691,7 +734,7 @@ pub enum GamepadInput { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Default, Component) + reflect(Debug, Default, Component, Clone) )] pub struct GamepadSettings { /// The default button settings. @@ -772,7 +815,11 @@ impl GamepadSettings { /// /// Allowed values: `0.0 <= ``release_threshold`` <= ``press_threshold`` <= 1.0` #[derive(Debug, PartialEq, Clone)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Default, Clone) +)] pub struct ButtonSettings { press_threshold: f32, release_threshold: f32, @@ -932,7 +979,11 @@ impl ButtonSettings { /// /// The valid range is `[-1.0, 1.0]`. #[derive(Debug, Clone, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Default, Clone) +)] pub struct AxisSettings { /// Values that are higher than `livezone_upperbound` will be rounded up to 1.0. livezone_upperbound: f32, @@ -1356,7 +1407,11 @@ impl ScaledAxisPosition { /// /// The valid range is from 0.0 to 1.0, inclusive. #[derive(Debug, Clone)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Default, Clone) +)] pub struct ButtonAxisSettings { /// The high value at which to apply rounding. pub high: f32, @@ -1460,7 +1515,7 @@ pub fn gamepad_connection_system( vendor_id, product_id, } => { - let Some(mut gamepad) = commands.get_entity(id) else { + let Ok(mut gamepad) = commands.get_entity(id) else { warn!("Gamepad {} removed before handling connection event.", id); continue; }; @@ -1475,7 +1530,7 @@ pub fn gamepad_connection_system( info!("Gamepad {} connected.", id); } GamepadConnection::Disconnected => { - let Some(mut gamepad) = commands.get_entity(id) else { + let Ok(mut gamepad) = commands.get_entity(id) else { warn!("Gamepad {} removed before handling disconnection event. You can ignore this if you manually removed it.", id); continue; }; @@ -1494,7 +1549,11 @@ pub fn gamepad_connection_system( // /// The connection status of a gamepad. #[derive(Debug, Clone, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1539,7 +1598,7 @@ pub fn gamepad_event_processing_system( match event { // Connections require inserting/removing components so they are done in a separate system RawGamepadEvent::Connection(send_event) => { - processed_events.send(GamepadEvent::from(send_event.clone())); + processed_events.write(GamepadEvent::from(send_event.clone())); } RawGamepadEvent::Axis(RawGamepadAxisChangedEvent { gamepad, @@ -1559,8 +1618,8 @@ pub fn gamepad_event_processing_system( gamepad_axis.analog.set(axis, filtered_value.raw); let send_event = GamepadAxisChangedEvent::new(gamepad, axis, filtered_value.scaled.to_f32()); - processed_axis_events.send(send_event); - processed_events.send(GamepadEvent::from(send_event)); + processed_axis_events.write(send_event); + processed_events.write(GamepadEvent::from(send_event)); } RawGamepadEvent::Button(RawGamepadButtonChangedEvent { gamepad, @@ -1583,7 +1642,7 @@ pub fn gamepad_event_processing_system( if button_settings.is_released(filtered_value.raw) { // Check if button was previously pressed if gamepad_buttons.pressed(button) { - processed_digital_events.send(GamepadButtonStateChangedEvent::new( + processed_digital_events.write(GamepadButtonStateChangedEvent::new( gamepad, button, ButtonState::Released, @@ -1595,7 +1654,7 @@ pub fn gamepad_event_processing_system( } else if button_settings.is_pressed(filtered_value.raw) { // Check if button was previously not pressed if !gamepad_buttons.pressed(button) { - processed_digital_events.send(GamepadButtonStateChangedEvent::new( + processed_digital_events.write(GamepadButtonStateChangedEvent::new( gamepad, button, ButtonState::Pressed, @@ -1615,8 +1674,8 @@ pub fn gamepad_event_processing_system( button_state, filtered_value.scaled.to_f32(), ); - processed_analog_events.send(send_event); - processed_events.send(GamepadEvent::from(send_event)); + processed_analog_events.write(send_event); + processed_events.write(GamepadEvent::from(send_event)); } } } @@ -1624,7 +1683,11 @@ pub fn gamepad_event_processing_system( /// The intensity at which a gamepad's force-feedback motors may rumble. #[derive(Clone, Copy, Debug, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] pub struct GamepadRumbleIntensity { /// The rumble intensity of the strong gamepad motor. /// @@ -1699,7 +1762,7 @@ impl GamepadRumbleIntensity { /// gamepads: Query>, /// ) { /// for entity in gamepads.iter() { -/// rumble_requests.send(GamepadRumbleRequest::Add { +/// rumble_requests.write(GamepadRumbleRequest::Add { /// gamepad: entity, /// intensity: GamepadRumbleIntensity::MAX, /// duration: Duration::from_secs_f32(0.5), @@ -1712,7 +1775,7 @@ impl GamepadRumbleIntensity { #[doc(alias = "vibration")] #[doc(alias = "vibrate")] #[derive(Event, Clone)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Clone))] pub enum GamepadRumbleRequest { /// Add a rumble to the given gamepad. /// @@ -1764,7 +1827,7 @@ mod tests { use bevy_app::{App, PreUpdate}; use bevy_ecs::entity::Entity; use bevy_ecs::event::Events; - use bevy_ecs::schedule::IntoSystemConfigs; + use bevy_ecs::schedule::IntoScheduleConfigs; fn test_button_axis_settings_filter( settings: ButtonAxisSettings, diff --git a/crates/bevy_input/src/gestures.rs b/crates/bevy_input/src/gestures.rs index 4f540fb139..5cd14d4634 100644 --- a/crates/bevy_input/src/gestures.rs +++ b/crates/bevy_input/src/gestures.rs @@ -18,7 +18,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// - Only available on **`macOS`** and **`iOS`**. /// - On **`iOS`**, must be enabled first #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -36,7 +40,11 @@ pub struct PinchGesture(pub f32); /// - Only available on **`macOS`** and **`iOS`**. /// - On **`iOS`**, must be enabled first #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -51,7 +59,11 @@ pub struct RotationGesture(pub f32); /// - Only available on **`macOS`** and **`iOS`**. /// - On **`iOS`**, must be enabled first #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -65,7 +77,11 @@ pub struct DoubleTapGesture; /// /// - On **`iOS`**, must be enabled first #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_input/src/keyboard.rs b/crates/bevy_input/src/keyboard.rs index 0525847062..ea5452fb53 100644 --- a/crates/bevy_input/src/keyboard.rs +++ b/crates/bevy_input/src/keyboard.rs @@ -98,7 +98,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Hash) + reflect(Debug, PartialEq, Hash, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -140,7 +140,7 @@ pub struct KeyboardInput { /// OS specific key combination that leads to Bevy window losing focus and not receiving any /// input events #[derive(Event, Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Clone, PartialEq))] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -190,7 +190,11 @@ pub fn keyboard_input_system( /// - Correctly match key press and release events. /// - On non-web platforms, support assigning keybinds to virtually any key through a UI. #[derive(Debug, Clone, Ord, PartialOrd, Copy, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Hash) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -231,7 +235,7 @@ pub enum NativeKeyCode { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -727,7 +731,7 @@ pub enum KeyCode { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -760,7 +764,7 @@ pub enum NativeKey { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( diff --git a/crates/bevy_input/src/lib.rs b/crates/bevy_input/src/lib.rs index 2da2c89cce..e1119c3d35 100644 --- a/crates/bevy_input/src/lib.rs +++ b/crates/bevy_input/src/lib.rs @@ -164,7 +164,7 @@ impl Plugin for InputPlugin { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( diff --git a/crates/bevy_input/src/mouse.rs b/crates/bevy_input/src/mouse.rs index 977b6e0aea..3a377d9329 100644 --- a/crates/bevy_input/src/mouse.rs +++ b/crates/bevy_input/src/mouse.rs @@ -27,7 +27,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// The event is read inside of the [`mouse_button_input_system`] /// to update the [`ButtonInput`] resource. #[derive(Event, Debug, Clone, Copy, PartialEq, Eq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -56,7 +60,7 @@ pub struct MouseButtonInput { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -88,7 +92,11 @@ pub enum MouseButton { /// /// [`DeviceEvent::MouseMotion`]: https://docs.rs/winit/latest/winit/event/enum.DeviceEvent.html#variant.MouseMotion #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -106,7 +114,11 @@ pub struct MouseMotion { /// The value of the event can either be interpreted as the amount of lines or the amount of pixels /// to scroll. #[derive(Debug, Hash, Clone, Copy, Eq, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -129,7 +141,11 @@ pub enum MouseScrollUnit { /// /// This event is the translated version of the `WindowEvent::MouseWheel` from the `winit` crate. #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -174,7 +190,7 @@ pub fn mouse_button_input_system( #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Default, Resource, PartialEq) + reflect(Debug, Default, Resource, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -195,7 +211,7 @@ pub struct AccumulatedMouseMotion { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Default, Resource, PartialEq) + reflect(Debug, Default, Resource, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( diff --git a/crates/bevy_input/src/touch.rs b/crates/bevy_input/src/touch.rs index e1784c13fd..28f3159d53 100644 --- a/crates/bevy_input/src/touch.rs +++ b/crates/bevy_input/src/touch.rs @@ -7,7 +7,7 @@ use bevy_ecs::{ system::ResMut, }; use bevy_math::Vec2; -use bevy_platform_support::collections::HashMap; +use bevy_platform::collections::HashMap; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; @@ -38,7 +38,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// This event is the translated version of the `WindowEvent::Touch` from the `winit` crate. /// It is available to the end user and can be used for game logic. #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -62,7 +66,11 @@ pub struct TouchInput { /// A force description of a [`Touch`] input. #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -111,7 +119,7 @@ pub enum ForceTouch { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( diff --git a/crates/bevy_input_focus/Cargo.toml b/crates/bevy_input_focus/Cargo.toml index 49aaed4dc8..0b2ca53830 100644 --- a/crates/bevy_input_focus/Cargo.toml +++ b/crates/bevy_input_focus/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "bevy_input_focus" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Keyboard focus management" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["bevy"] -rust-version = "1.83.0" +rust-version = "1.85.0" [features] default = ["std", "bevy_reflect", "bevy_ecs/async_executor"] @@ -55,15 +55,6 @@ critical-section = [ "bevy_input/critical-section", ] -## `portable-atomic` provides additional platform support for atomic types and -## operations, even on targets without native support. -portable-atomic = [ - "bevy_app/portable-atomic", - "bevy_ecs/portable-atomic", - "bevy_reflect?/portable-atomic", - "bevy_input/portable-atomic", -] - ## Uses the `libm` maths library instead of the one provided in `std` and `core`. libm = ["bevy_math/libm", "bevy_window/libm"] diff --git a/crates/bevy_input_focus/src/autofocus.rs b/crates/bevy_input_focus/src/autofocus.rs index dc3b0919d2..72024418d2 100644 --- a/crates/bevy_input_focus/src/autofocus.rs +++ b/crates/bevy_input_focus/src/autofocus.rs @@ -18,7 +18,7 @@ use bevy_reflect::{prelude::*, Reflect}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Default, Component) + reflect(Debug, Default, Component, Clone) )] #[component(on_add = on_auto_focus_added)] pub struct AutoFocus; diff --git a/crates/bevy_input_focus/src/directional_navigation.rs b/crates/bevy_input_focus/src/directional_navigation.rs index d42b7653f6..2f3d647025 100644 --- a/crates/bevy_input_focus/src/directional_navigation.rs +++ b/crates/bevy_input_focus/src/directional_navigation.rs @@ -17,7 +17,7 @@ use bevy_app::prelude::*; use bevy_ecs::{ - entity::{hash_map::EntityHashMap, hash_set::EntityHashSet}, + entity::{EntityHashMap, EntityHashSet}, prelude::*, system::SystemParam, }; @@ -48,7 +48,7 @@ impl Plugin for DirectionalNavigationPlugin { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Default, Debug, PartialEq) + reflect(Default, Debug, PartialEq, Clone) )] pub struct NavNeighbors { /// The array of neighbors, one for each [`CompassOctant`]. @@ -94,7 +94,7 @@ impl NavNeighbors { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Resource, Debug, Default, PartialEq) + reflect(Resource, Debug, Default, PartialEq, Clone) )] pub struct DirectionalNavigationMap { /// A directed graph of focusable entities. diff --git a/crates/bevy_input_focus/src/lib.rs b/crates/bevy_input_focus/src/lib.rs index 0ec0b42897..3f7ecf9e7c 100644 --- a/crates/bevy_input_focus/src/lib.rs +++ b/crates/bevy_input_focus/src/lib.rs @@ -80,7 +80,7 @@ use bevy_reflect::{prelude::*, Reflect}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Default, Resource) + reflect(Debug, Default, Resource, Clone) )] pub struct InputFocus(pub Option); @@ -123,7 +123,11 @@ impl InputFocus { /// /// By default, this resource is set to `false`. #[derive(Clone, Debug, Resource, Default)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Resource))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Resource, Clone) +)] pub struct InputFocusVisible(pub bool); /// A bubble-able user input event that starts at the currently focused entity. @@ -134,7 +138,7 @@ pub struct InputFocusVisible(pub bool); /// To set up your own bubbling input event, add the [`dispatch_focused_input::`](dispatch_focused_input) system to your app, /// in the [`InputFocusSet::Dispatch`] system set during [`PreUpdate`]. #[derive(Clone, Debug, Component)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Component))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Component, Clone))] pub struct FocusedInput { /// The underlying input event. pub input: E, @@ -151,17 +155,17 @@ impl Event for FocusedInput { #[derive(QueryData)] /// These are for accessing components defined on the targeted entity pub struct WindowTraversal { - parent: Option<&'static ChildOf>, + child_of: Option<&'static ChildOf>, window: Option<&'static Window>, } impl Traversal> for WindowTraversal { fn traverse(item: Self::Item<'_>, event: &FocusedInput) -> Option { - let WindowTraversalItem { parent, window } = item; + let WindowTraversalItem { child_of, window } = item; // Send event to parent, if it has one. - if let Some(parent) = parent { - return Some(parent.get()); + if let Some(child_of) = child_of { + return Some(child_of.parent()); }; // Otherwise, send it to the window entity (unless this is a window entity). @@ -226,7 +230,7 @@ pub fn dispatch_focused_input( windows: Query>, mut commands: Commands, ) { - if let Ok(window) = windows.get_single() { + if let Ok(window) = windows.single() { // If an element has keyboard focus, then dispatch the input event to that element. if let Some(focused_entity) = focus.0 { for ev in key_events.read() { @@ -334,7 +338,7 @@ impl IsFocused for World { if e == entity { return true; } - if let Some(parent) = self.entity(e).get::().map(ChildOf::get) { + if let Some(parent) = self.entity(e).get::().map(ChildOf::parent) { e = parent; } else { return false; diff --git a/crates/bevy_input_focus/src/tab_navigation.rs b/crates/bevy_input_focus/src/tab_navigation.rs index 683e5d12d5..a5fe691458 100644 --- a/crates/bevy_input_focus/src/tab_navigation.rs +++ b/crates/bevy_input_focus/src/tab_navigation.rs @@ -58,7 +58,7 @@ use { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Default, Component, PartialEq) + reflect(Debug, Default, Component, PartialEq, Clone) )] pub struct TabIndex(pub i32); @@ -67,7 +67,7 @@ pub struct TabIndex(pub i32); #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Default, Component) + reflect(Debug, Default, Component, Clone) )] pub struct TabGroup { /// The order of the tab group relative to other tab groups. diff --git a/crates/bevy_internal/Cargo.toml b/crates/bevy_internal/Cargo.toml index 4f24eeaaea..dc376adfc6 100644 --- a/crates/bevy_internal/Cargo.toml +++ b/crates/bevy_internal/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_internal" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "An internal Bevy crate used to facilitate optional dynamic linking via the 'dynamic_linking' feature" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -14,6 +14,7 @@ trace = [ "bevy_app/trace", "bevy_asset?/trace", "bevy_core_pipeline?/trace", + "bevy_anti_aliasing?/trace", "bevy_ecs/trace", "bevy_log/trace", "bevy_pbr?/trace", @@ -29,12 +30,6 @@ sysinfo_plugin = ["bevy_diagnostic/sysinfo_plugin"] # Texture formats that have specific rendering support (HDR enabled by default) basis-universal = ["bevy_image/basis-universal", "bevy_render/basis-universal"] -dds = [ - "bevy_image/dds", - "bevy_render/dds", - "bevy_core_pipeline/dds", - "bevy_gltf/dds", -] exr = ["bevy_image/exr", "bevy_render/exr"] hdr = ["bevy_image/hdr", "bevy_render/hdr"] ktx2 = ["bevy_image/ktx2", "bevy_render/ktx2"] @@ -55,6 +50,7 @@ qoi = ["bevy_image/qoi"] tga = ["bevy_image/tga"] tiff = ["bevy_image/tiff"] webp = ["bevy_image/webp"] +dds = ["bevy_image/dds"] # Enable SPIR-V passthrough spirv_shader_passthrough = ["bevy_render/spirv_shader_passthrough"] @@ -67,7 +63,7 @@ statically-linked-dxc = ["bevy_render/statically-linked-dxc"] tonemapping_luts = ["bevy_core_pipeline/tonemapping_luts"] # Include SMAA LUT KTX2 Files -smaa_luts = ["bevy_core_pipeline/smaa_luts"] +smaa_luts = ["bevy_anti_aliasing/smaa_luts"] # Audio format support (vorbis is enabled by default) flac = ["bevy_audio/flac"] @@ -88,8 +84,10 @@ shader_format_glsl = [ "bevy_pbr?/shader_format_glsl", ] shader_format_spirv = ["bevy_render/shader_format_spirv"] +shader_format_wesl = ["bevy_render/shader_format_wesl"] serialize = [ + "bevy_a11y?/serialize", "bevy_color?/serialize", "bevy_ecs/serialize", "bevy_image?/serialize", @@ -101,15 +99,16 @@ serialize = [ "bevy_ui?/serialize", "bevy_window?/serialize", "bevy_winit?/serialize", - "bevy_platform_support/serialize", + "bevy_platform/serialize", ] multi_threaded = [ + "std", "bevy_asset?/multi_threaded", "bevy_ecs/multi_threaded", "bevy_render?/multi_threaded", "bevy_tasks/multi_threaded", ] -async-io = ["bevy_tasks/async-io"] +async-io = ["std", "bevy_tasks/async-io"] # Display server protocol support (X11 is enabled by default) wayland = ["bevy_winit/wayland"] @@ -149,6 +148,7 @@ pbr_specular_textures = [ # Optimise for WebGL2 webgl = [ "bevy_core_pipeline?/webgl", + "bevy_anti_aliasing?/webgl", "bevy_pbr?/webgl", "bevy_render?/webgl", "bevy_gizmos?/webgl", @@ -157,6 +157,7 @@ webgl = [ webgpu = [ "bevy_core_pipeline?/webgpu", + "bevy_anti_aliasing?/webgpu", "bevy_pbr?/webgpu", "bevy_render?/webgpu", "bevy_gizmos?/webgpu", @@ -173,6 +174,7 @@ bevy_sprite = ["dep:bevy_sprite", "bevy_gizmos?/bevy_sprite", "bevy_image"] bevy_pbr = ["dep:bevy_pbr", "bevy_gizmos?/bevy_pbr", "bevy_image"] bevy_window = ["dep:bevy_window", "dep:bevy_a11y"] bevy_core_pipeline = ["dep:bevy_core_pipeline", "bevy_image"] +bevy_anti_aliasing = ["dep:bevy_anti_aliasing", "bevy_image"] bevy_gizmos = ["dep:bevy_gizmos", "bevy_image"] bevy_gltf = ["dep:bevy_gltf", "bevy_image"] bevy_ui = ["dep:bevy_ui", "bevy_image"] @@ -195,6 +197,8 @@ bevy_render = [ "bevy_scene?/bevy_render", "bevy_gizmos?/bevy_render", "bevy_image", + "bevy_color/wgpu-types", + "bevy_color/encase", ] # Enable assertions to check the validity of parameters passed to glam @@ -259,9 +263,6 @@ bevy_ui_picking_backend = ["bevy_picking", "bevy_ui/bevy_ui_picking_backend"] # Provides a UI debug overlay bevy_ui_debug = ["bevy_ui?/bevy_ui_debug"] -# Enable support for the ios_simulator by downgrading some rendering capabilities -ios_simulator = ["bevy_pbr?/ios_simulator", "bevy_render?/ios_simulator"] - # Enable built in global state machines bevy_state = ["dep:bevy_state"] @@ -275,56 +276,161 @@ reflect_functions = [ "bevy_ecs/reflect_functions", ] +# Enable documentation reflection +reflect_documentation = ["bevy_reflect/documentation"] + # Enable winit custom cursor support custom_cursor = ["bevy_winit/custom_cursor"] # Experimental support for nodes that are ignored for UI layouting ghost_nodes = ["bevy_ui/ghost_nodes"] +# Use the configurable global error handler as the default error handler. +configurable_error_handler = ["bevy_ecs/configurable_error_handler"] + +# Allows access to the `std` crate. Enabling this feature will prevent compilation +# on `no_std` targets, but provides access to certain additional features on +# supported platforms. +std = [ + "bevy_a11y?/std", + "bevy_app/std", + "bevy_color?/std", + "bevy_diagnostic/std", + "bevy_ecs/std", + "bevy_input/std", + "bevy_input_focus?/std", + "bevy_math/std", + "bevy_platform/std", + "bevy_reflect/std", + "bevy_state?/std", + "bevy_time/std", + "bevy_transform/std", + "bevy_utils/std", + "bevy_tasks/std", + "bevy_window?/std", +] + +# `critical-section` provides the building blocks for synchronization primitives +# on all platforms, including `no_std`. +critical-section = [ + "bevy_a11y?/critical-section", + "bevy_app/critical-section", + "bevy_diagnostic/critical-section", + "bevy_ecs/critical-section", + "bevy_input/critical-section", + "bevy_input_focus?/critical-section", + "bevy_platform/critical-section", + "bevy_reflect/critical-section", + "bevy_state?/critical-section", + "bevy_time/critical-section", + "bevy_utils/critical-section", + "bevy_tasks/critical-section", +] + +# Uses the `libm` maths library instead of the one provided in `std` and `core`. +libm = [ + "bevy_color?/libm", + "bevy_input/libm", + "bevy_input_focus?/libm", + "bevy_math/libm", + "bevy_transform/libm", + "bevy_window?/libm", +] + +# Uses `async-executor` as a task execution backend. +# This backend is incompatible with `no_std` targets. +async_executor = [ + "std", + "bevy_tasks/async_executor", + "bevy_ecs/async_executor", + "bevy_transform/async_executor", +] + +# Enables use of browser APIs. +# Note this is currently only applicable on `wasm32` architectures. +web = [ + "bevy_app/web", + "bevy_platform/web", + "bevy_reflect/web", + "bevy_tasks/web", +] + [dependencies] -# bevy -bevy_a11y = { path = "../bevy_a11y", version = "0.16.0-dev", optional = true } -bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } -bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } -bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.16.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } -bevy_state = { path = "../bevy_state", optional = true, version = "0.16.0-dev" } -bevy_input = { path = "../bevy_input", version = "0.16.0-dev" } -bevy_input_focus = { path = "../bevy_input_focus", version = "0.16.0-dev" } -bevy_log = { path = "../bevy_log", version = "0.16.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.16.0-dev", features = [ +# bevy (no_std) +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false, features = [ "bevy_reflect", ] } -bevy_platform_support = { path = "../bevy_platform_support", version = "0.16.0-dev" } -bevy_ptr = { path = "../bevy_ptr", version = "0.16.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ - "bevy", +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev", default-features = false } +bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.16.0-dev", default-features = false } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev", default-features = false, features = [ + "bevy_reflect", ] } -bevy_time = { path = "../bevy_time", version = "0.16.0-dev" } -bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } -bevy_window = { path = "../bevy_window", version = "0.16.0-dev", optional = true } -bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev" } +bevy_input = { path = "../bevy_input", version = "0.16.0-dev", default-features = false, features = [ + "bevy_reflect", +] } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev", default-features = false, features = [ + "bevy_reflect", + "nostd-libm", +] } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "alloc", +] } +bevy_ptr = { path = "../bevy_ptr", version = "0.16.0-dev", default-features = false } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, features = [ + "smallvec", +] } +bevy_time = { path = "../bevy_time", version = "0.16.0-dev", default-features = false, features = [ + "bevy_reflect", +] } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev", default-features = false, features = [ + "bevy-support", + "bevy_reflect", +] } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev", default-features = false, features = [ + "alloc", +] } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false } + +# bevy (std required) +bevy_log = { path = "../bevy_log", version = "0.16.0-dev", optional = true } + # bevy (optional) +bevy_a11y = { path = "../bevy_a11y", optional = true, version = "0.16.0-dev", features = [ + "bevy_reflect", +] } bevy_animation = { path = "../bevy_animation", optional = true, version = "0.16.0-dev" } bevy_asset = { path = "../bevy_asset", optional = true, version = "0.16.0-dev" } bevy_audio = { path = "../bevy_audio", optional = true, version = "0.16.0-dev" } -bevy_color = { path = "../bevy_color", optional = true, version = "0.16.0-dev" } +bevy_color = { path = "../bevy_color", optional = true, version = "0.16.0-dev", default-features = false, features = [ + "alloc", + "bevy_reflect", +] } bevy_core_pipeline = { path = "../bevy_core_pipeline", optional = true, version = "0.16.0-dev" } +bevy_anti_aliasing = { path = "../bevy_anti_aliasing", optional = true, version = "0.16.0-dev" } bevy_dev_tools = { path = "../bevy_dev_tools", optional = true, version = "0.16.0-dev" } bevy_gilrs = { path = "../bevy_gilrs", optional = true, version = "0.16.0-dev" } bevy_gizmos = { path = "../bevy_gizmos", optional = true, version = "0.16.0-dev", default-features = false } bevy_gltf = { path = "../bevy_gltf", optional = true, version = "0.16.0-dev" } bevy_image = { path = "../bevy_image", optional = true, version = "0.16.0-dev" } +bevy_input_focus = { path = "../bevy_input_focus", optional = true, version = "0.16.0-dev", default-features = false, features = [ + "bevy_reflect", +] } bevy_pbr = { path = "../bevy_pbr", optional = true, version = "0.16.0-dev" } bevy_picking = { path = "../bevy_picking", optional = true, version = "0.16.0-dev" } bevy_remote = { path = "../bevy_remote", optional = true, version = "0.16.0-dev" } bevy_render = { path = "../bevy_render", optional = true, version = "0.16.0-dev" } bevy_scene = { path = "../bevy_scene", optional = true, version = "0.16.0-dev" } bevy_sprite = { path = "../bevy_sprite", optional = true, version = "0.16.0-dev" } +bevy_state = { path = "../bevy_state", optional = true, version = "0.16.0-dev", default-features = false, features = [ + "bevy_app", + "bevy_reflect", +] } bevy_text = { path = "../bevy_text", optional = true, version = "0.16.0-dev" } bevy_ui = { path = "../bevy_ui", optional = true, version = "0.16.0-dev" } -bevy_winit = { path = "../bevy_winit", optional = true, version = "0.16.0-dev" } +bevy_window = { path = "../bevy_window", optional = true, version = "0.16.0-dev", default-features = false, features = [ + "bevy_reflect", +] } +bevy_winit = { path = "../bevy_winit", optional = true, version = "0.16.0-dev", default-features = false } [lints] workspace = true diff --git a/crates/bevy_internal/src/default_plugins.rs b/crates/bevy_internal/src/default_plugins.rs index d9aee3017a..db1152a362 100644 --- a/crates/bevy_internal/src/default_plugins.rs +++ b/crates/bevy_internal/src/default_plugins.rs @@ -4,6 +4,7 @@ plugin_group! { /// This plugin group will add all the default plugins for a *Bevy* application: pub struct DefaultPlugins { bevy_app:::PanicHandlerPlugin, + #[cfg(feature = "bevy_log")] bevy_log:::LogPlugin, bevy_app:::TaskPoolPlugin, bevy_diagnostic:::FrameCountPlugin, @@ -17,6 +18,7 @@ plugin_group! { bevy_window:::WindowPlugin, #[cfg(feature = "bevy_window")] bevy_a11y:::AccessibilityPlugin, + #[cfg(feature = "std")] #[custom(cfg(any(unix, windows)))] bevy_app:::TerminalCtrlCHandlerPlugin, #[cfg(feature = "bevy_asset")] @@ -36,6 +38,8 @@ plugin_group! { bevy_render::pipelined_rendering:::PipelinedRenderingPlugin, #[cfg(feature = "bevy_core_pipeline")] bevy_core_pipeline:::CorePipelinePlugin, + #[cfg(feature = "bevy_anti_aliasing")] + bevy_anti_aliasing:::AntiAliasingPlugin, #[cfg(feature = "bevy_sprite")] bevy_sprite:::SpritePlugin, #[cfg(feature = "bevy_text")] diff --git a/crates/bevy_internal/src/lib.rs b/crates/bevy_internal/src/lib.rs index 578527d68e..07dd936ab1 100644 --- a/crates/bevy_internal/src/lib.rs +++ b/crates/bevy_internal/src/lib.rs @@ -4,6 +4,7 @@ html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" )] +#![no_std] //! This module is separated into its own crate to enable simple dynamic linking for Bevy, and should not be used directly @@ -17,6 +18,8 @@ pub use default_plugins::*; pub use bevy_a11y as a11y; #[cfg(feature = "bevy_animation")] pub use bevy_animation as animation; +#[cfg(feature = "bevy_anti_aliasing")] +pub use bevy_anti_aliasing as anti_aliasing; pub use bevy_app as app; #[cfg(feature = "bevy_asset")] pub use bevy_asset as asset; @@ -39,14 +42,16 @@ pub use bevy_gltf as gltf; #[cfg(feature = "bevy_image")] pub use bevy_image as image; pub use bevy_input as input; +#[cfg(feature = "bevy_input_focus")] pub use bevy_input_focus as input_focus; +#[cfg(feature = "bevy_log")] pub use bevy_log as log; pub use bevy_math as math; #[cfg(feature = "bevy_pbr")] pub use bevy_pbr as pbr; #[cfg(feature = "bevy_picking")] pub use bevy_picking as picking; -pub use bevy_platform_support as platform_support; +pub use bevy_platform as platform; pub use bevy_ptr as ptr; pub use bevy_reflect as reflect; #[cfg(feature = "bevy_remote")] diff --git a/crates/bevy_internal/src/prelude.rs b/crates/bevy_internal/src/prelude.rs index 1c19c7ccc1..26d5c7e2af 100644 --- a/crates/bevy_internal/src/prelude.rs +++ b/crates/bevy_internal/src/prelude.rs @@ -1,10 +1,14 @@ #[doc(hidden)] pub use crate::{ - app::prelude::*, ecs::prelude::*, input::prelude::*, log::prelude::*, math::prelude::*, + app::prelude::*, ecs::prelude::*, input::prelude::*, math::prelude::*, platform::prelude::*, reflect::prelude::*, time::prelude::*, transform::prelude::*, utils::prelude::*, DefaultPlugins, MinimalPlugins, }; +#[doc(hidden)] +#[cfg(feature = "bevy_log")] +pub use crate::log::prelude::*; + #[doc(hidden)] #[cfg(feature = "bevy_window")] pub use crate::window::prelude::*; diff --git a/crates/bevy_log/Cargo.toml b/crates/bevy_log/Cargo.toml index 9a982b4209..cc7c53e676 100644 --- a/crates/bevy_log/Cargo.toml +++ b/crates/bevy_log/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_log" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides logging for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -30,14 +30,18 @@ tracing = { version = "0.1", default-features = false, features = ["std"] } # Tracy dependency compatibility table: # https://github.com/nagisa/rust_tracy_client -tracing-tracy = { version = "0.11.0", optional = true } -tracy-client = { version = "0.17.0", optional = true } +tracing-tracy = { version = "0.11.4", optional = true } +tracy-client = { version = "0.18.0", optional = true } [target.'cfg(target_os = "android")'.dependencies] android_log-sys = "0.3.0" [target.'cfg(target_arch = "wasm32")'.dependencies] tracing-wasm = "0.2.1" +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } [target.'cfg(target_os = "ios")'.dependencies] tracing-oslog = "0.2" diff --git a/crates/bevy_log/src/lib.rs b/crates/bevy_log/src/lib.rs index f0743f022d..055395bad7 100644 --- a/crates/bevy_log/src/lib.rs +++ b/crates/bevy_log/src/lib.rs @@ -80,11 +80,11 @@ pub(crate) struct FlushGuard(SyncCell); /// Adds logging to Apps. This plugin is part of the `DefaultPlugins`. Adding /// this plugin will setup a collector appropriate to your target platform: /// * Using [`tracing-subscriber`](https://crates.io/crates/tracing-subscriber) by default, -/// logging to `stdout`. +/// logging to `stdout`. /// * Using [`android_log-sys`](https://crates.io/crates/android_log-sys) on Android, -/// logging to Android logs. +/// logging to Android logs. /// * Using [`tracing-wasm`](https://crates.io/crates/tracing-wasm) in Wasm, logging -/// to the browser console. +/// to the browser console. /// /// You can configure this plugin. /// ```no_run @@ -117,7 +117,10 @@ pub(crate) struct FlushGuard(SyncCell); /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, PluginGroup}; /// # use bevy_log::LogPlugin; /// fn main() { +/// # // SAFETY: Single-threaded +/// # unsafe { /// std::env::set_var("NO_COLOR", "1"); +/// # } /// App::new() /// .add_plugins(DefaultPlugins) /// .run(); @@ -256,6 +259,7 @@ impl Default for LogPlugin { } impl Plugin for LogPlugin { + #[expect(clippy::print_stderr, reason = "Allowed during logger setup")] fn build(&self, app: &mut App) { #[cfg(feature = "trace")] { diff --git a/crates/bevy_macro_utils/Cargo.toml b/crates/bevy_macro_utils/Cargo.toml index 28ee17c871..36be752349 100644 --- a/crates/bevy_macro_utils/Cargo.toml +++ b/crates/bevy_macro_utils/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_macro_utils" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "A collection of utils for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -12,7 +12,10 @@ keywords = ["bevy"] syn = "2.0" quote = "1.0" proc-macro2 = "1.0" -cargo-manifest-proc-macros = "0.3.3" +toml_edit = { version = "0.22.7", default-features = false, features = [ + "parse", +] } +parking_lot = { version = "0.12" } [lints] workspace = true diff --git a/crates/bevy_macro_utils/src/bevy_manifest.rs b/crates/bevy_macro_utils/src/bevy_manifest.rs index 5a492f8a81..b0d321ba22 100644 --- a/crates/bevy_macro_utils/src/bevy_manifest.rs +++ b/crates/bevy_macro_utils/src/bevy_manifest.rs @@ -1,55 +1,112 @@ extern crate proc_macro; -use std::sync::MutexGuard; - -use cargo_manifest_proc_macros::{ - CargoManifest, CrateReExportingPolicy, KnownReExportingCrate, PathPiece, - TryResolveCratePathError, -}; +use alloc::collections::BTreeMap; +use parking_lot::{lock_api::RwLockReadGuard, MappedRwLockReadGuard, RwLock, RwLockWriteGuard}; use proc_macro::TokenStream; +use std::{ + env, + path::{Path, PathBuf}, + time::SystemTime, +}; +use toml_edit::{ImDocument, Item}; -struct BevyReExportingPolicy; - -impl CrateReExportingPolicy for BevyReExportingPolicy { - fn get_re_exported_crate_path(&self, crate_name: &str) -> Option { - crate_name.strip_prefix("bevy_").map(|s| { - let mut path = PathPiece::new(); - path.push(syn::parse_str::(s).unwrap()); - path - }) - } +/// The path to the `Cargo.toml` file for the Bevy project. +#[derive(Debug)] +pub struct BevyManifest { + manifest: ImDocument>, + modified_time: SystemTime, } const BEVY: &str = "bevy"; -const KNOWN_RE_EXPORTING_CRATE_BEVY: KnownReExportingCrate = KnownReExportingCrate { - re_exporting_crate_package_name: BEVY, - crate_re_exporting_policy: &BevyReExportingPolicy {}, -}; - -const ALL_KNOWN_RE_EXPORTING_CRATES: &[&KnownReExportingCrate] = &[&KNOWN_RE_EXPORTING_CRATE_BEVY]; - -/// The path to the `Cargo.toml` file for the Bevy project. -pub struct BevyManifest(MutexGuard<'static, CargoManifest>); - impl BevyManifest { /// Returns a global shared instance of the [`BevyManifest`] struct. - pub fn shared() -> Self { - Self(CargoManifest::shared()) + pub fn shared() -> MappedRwLockReadGuard<'static, BevyManifest> { + static MANIFESTS: RwLock> = RwLock::new(BTreeMap::new()); + let manifest_path = Self::get_manifest_path(); + let modified_time = Self::get_manifest_modified_time(&manifest_path) + .expect("The Cargo.toml should have a modified time"); + + if let Ok(manifest) = + RwLockReadGuard::try_map(MANIFESTS.read(), |manifests| manifests.get(&manifest_path)) + { + if manifest.modified_time == modified_time { + return manifest; + } + } + + let manifest = BevyManifest { + manifest: Self::read_manifest(&manifest_path), + modified_time, + }; + + let key = manifest_path.clone(); + let mut manifests = MANIFESTS.write(); + manifests.insert(key, manifest); + + RwLockReadGuard::map(RwLockWriteGuard::downgrade(manifests), |manifests| { + manifests.get(&manifest_path).unwrap() + }) + } + + fn get_manifest_path() -> PathBuf { + env::var_os("CARGO_MANIFEST_DIR") + .map(|path| { + let mut path = PathBuf::from(path); + path.push("Cargo.toml"); + assert!( + path.exists(), + "Cargo manifest does not exist at path {}", + path.display() + ); + path + }) + .expect("CARGO_MANIFEST_DIR is not defined.") + } + + fn get_manifest_modified_time( + cargo_manifest_path: &Path, + ) -> Result { + std::fs::metadata(cargo_manifest_path).and_then(|metadata| metadata.modified()) + } + + fn read_manifest(path: &Path) -> ImDocument> { + let manifest = std::fs::read_to_string(path) + .unwrap_or_else(|_| panic!("Unable to read cargo manifest: {}", path.display())) + .into_boxed_str(); + ImDocument::parse(manifest) + .unwrap_or_else(|_| panic!("Failed to parse cargo manifest: {}", path.display())) } /// Attempt to retrieve the [path](syn::Path) of a particular package in /// the [manifest](BevyManifest) by [name](str). - pub fn maybe_get_path(&self, name: &str) -> Result { - self.0 - .try_resolve_crate_path(name, ALL_KNOWN_RE_EXPORTING_CRATES) - } + pub fn maybe_get_path(&self, name: &str) -> Option { + let find_in_deps = |deps: &Item| -> Option { + let package = if deps.get(name).is_some() { + return Some(Self::parse_str(name)); + } else if deps.get(BEVY).is_some() { + BEVY + } else { + // Note: to support bevy crate aliases, we could do scanning here to find a crate with a "package" name that + // matches our request, but that would then mean we are scanning every dependency (and dev dependency) for every + // macro execution that hits this branch (which includes all built-in bevy crates). Our current stance is that supporting + // remapped crate names in derive macros is not worth that "compile time" price of admission. As a workaround, people aliasing + // bevy crate names can use "use REMAPPED as bevy_X" or "use REMAPPED::x as bevy_x". + return None; + }; - /// Returns the path for the crate with the given name. - pub fn get_path(&self, name: &str) -> syn::Path { - self.maybe_get_path(name) - //.expect("Failed to get path for crate") - .unwrap_or_else(|_err| Self::parse_str(name)) + let mut path = Self::parse_str::(package); + if let Some(module) = name.strip_prefix("bevy_") { + path.segments.push(Self::parse_str(module)); + } + Some(path) + }; + + let deps = self.manifest.get("dependencies"); + let deps_dev = self.manifest.get("dev-dependencies"); + + deps.and_then(find_in_deps) + .or_else(|| deps_dev.and_then(find_in_deps)) } /// Attempt to parse the provided [path](str) as a [syntax tree node](syn::parse::Parse) @@ -57,6 +114,12 @@ impl BevyManifest { syn::parse(path.parse::().ok()?).ok() } + /// Returns the path for the crate with the given name. + pub fn get_path(&self, name: &str) -> syn::Path { + self.maybe_get_path(name) + .unwrap_or_else(|| Self::parse_str(name)) + } + /// Attempt to parse provided [path](str) as a [syntax tree node](syn::parse::Parse). /// /// # Panics @@ -67,18 +130,4 @@ impl BevyManifest { pub fn parse_str(path: &str) -> T { Self::try_parse_str(path).unwrap() } - - /// Attempt to get a subcrate [path](syn::Path) under Bevy by [name](str) - pub fn get_subcrate(&self, subcrate: &str) -> Result { - self.maybe_get_path(BEVY) - .map(|bevy_path| { - let mut segments = bevy_path.segments; - segments.push(BevyManifest::parse_str(subcrate)); - syn::Path { - leading_colon: None, - segments, - } - }) - .or_else(|_err| self.maybe_get_path(&format!("bevy_{subcrate}"))) - } } diff --git a/crates/bevy_macro_utils/src/lib.rs b/crates/bevy_macro_utils/src/lib.rs index 28de7e2227..aa386101f1 100644 --- a/crates/bevy_macro_utils/src/lib.rs +++ b/crates/bevy_macro_utils/src/lib.rs @@ -7,6 +7,7 @@ //! A collection of helper types and functions for working on macros within the Bevy ecosystem. +extern crate alloc; extern crate proc_macro; mod attrs; diff --git a/crates/bevy_math/Cargo.toml b/crates/bevy_math/Cargo.toml index 3ef041492a..7aae1ec74b 100644 --- a/crates/bevy_math/Cargo.toml +++ b/crates/bevy_math/Cargo.toml @@ -1,22 +1,22 @@ [package] name = "bevy_math" version = "0.16.0-dev" -edition = "2021" +edition = "2024" description = "Provides math functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["bevy"] -rust-version = "1.83.0" +rust-version = "1.85.0" [dependencies] -glam = { version = "0.29", default-features = false, features = ["bytemuck"] } +glam = { version = "0.29.3", default-features = false, features = ["bytemuck"] } thiserror = { version = "2", default-features = false } derive_more = { version = "1", default-features = false, features = [ "from", "into", ] } -itertools = { version = "0.13.0", default-features = false } +itertools = { version = "0.14.0", default-features = false } serde = { version = "1", default-features = false, features = [ "derive", ], optional = true } @@ -36,10 +36,8 @@ approx = "0.5" rand = "0.8" rand_chacha = "0.3" # Enable the approx feature when testing. -bevy_math = { path = ".", version = "0.16.0-dev", default-features = false, features = [ - "approx", -] } -glam = { version = "0.29", default-features = false, features = ["approx"] } +bevy_math = { path = ".", default-features = false, features = ["approx"] } +glam = { version = "0.29.3", default-features = false, features = ["approx"] } [features] default = ["std", "rand", "curve"] @@ -79,6 +77,9 @@ rand = ["dep:rand", "dep:rand_distr", "glam/rand"] curve = [] # Enable bevy_reflect (requires alloc) bevy_reflect = ["dep:bevy_reflect", "alloc"] +# Enable libm mathematical functions as a fallback for no_std environments. +# Can be overridden with std feature. +nostd-libm = ["dep:libm", "glam/nostd-libm"] [lints] workspace = true diff --git a/crates/bevy_math/clippy.toml b/crates/bevy_math/clippy.toml index 0fb122e4dc..c1f67e044d 100644 --- a/crates/bevy_math/clippy.toml +++ b/crates/bevy_math/clippy.toml @@ -34,5 +34,6 @@ disallowed-methods = [ { path = "f32::copysign", reason = "use ops::copysign instead for no_std compatibility" }, { path = "f32::round", reason = "use ops::round instead for no_std compatibility" }, { path = "f32::floor", reason = "use ops::floor instead for no_std compatibility" }, + { path = "f32::ceil", reason = "use ops::ceil instead for no_std compatibility" }, { path = "f32::fract", reason = "use ops::fract instead for no_std compatibility" }, ] diff --git a/crates/bevy_math/images/easefunction/BothSteps.svg b/crates/bevy_math/images/easefunction/BothSteps.svg new file mode 100644 index 0000000000..92090fa5d4 --- /dev/null +++ b/crates/bevy_math/images/easefunction/BothSteps.svg @@ -0,0 +1,5 @@ + +BothSteps(4, Both) + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/EndSteps.svg b/crates/bevy_math/images/easefunction/EndSteps.svg new file mode 100644 index 0000000000..dafe6825fe --- /dev/null +++ b/crates/bevy_math/images/easefunction/EndSteps.svg @@ -0,0 +1,5 @@ + +EndSteps(4, End) + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/NoneSteps.svg b/crates/bevy_math/images/easefunction/NoneSteps.svg new file mode 100644 index 0000000000..8434f4126b --- /dev/null +++ b/crates/bevy_math/images/easefunction/NoneSteps.svg @@ -0,0 +1,5 @@ + +NoneSteps(4, None) + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/StartSteps.svg b/crates/bevy_math/images/easefunction/StartSteps.svg new file mode 100644 index 0000000000..476a17d364 --- /dev/null +++ b/crates/bevy_math/images/easefunction/StartSteps.svg @@ -0,0 +1,5 @@ + +StartSteps(4, Start) + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/Steps.svg b/crates/bevy_math/images/easefunction/Steps.svg deleted file mode 100644 index 3e7dec055b..0000000000 --- a/crates/bevy_math/images/easefunction/Steps.svg +++ /dev/null @@ -1,5 +0,0 @@ - -Steps(4) - - - \ No newline at end of file diff --git a/crates/bevy_math/src/aspect_ratio.rs b/crates/bevy_math/src/aspect_ratio.rs index 0289957164..7b7ae6d3ba 100644 --- a/crates/bevy_math/src/aspect_ratio.rs +++ b/crates/bevy_math/src/aspect_ratio.rs @@ -9,7 +9,11 @@ use bevy_reflect::Reflect; /// An `AspectRatio` is the ratio of width to height. #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Into)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] pub struct AspectRatio(f32); impl AspectRatio { diff --git a/crates/bevy_math/src/bounding/bounded2d/mod.rs b/crates/bevy_math/src/bounding/bounded2d/mod.rs index c5be831a86..bea18f5808 100644 --- a/crates/bevy_math/src/bounding/bounded2d/mod.rs +++ b/crates/bevy_math/src/bounding/bounded2d/mod.rs @@ -9,6 +9,10 @@ use crate::{ #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; +#[cfg(all(feature = "bevy_reflect", feature = "serialize"))] +use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; +#[cfg(feature = "serialize")] +use serde::{Deserialize, Serialize}; /// Computes the geometric center of the given set of points. #[inline(always)] @@ -32,8 +36,17 @@ pub trait Bounded2d { /// A 2D axis-aligned bounding box, or bounding rectangle #[doc(alias = "BoundingRectangle")] -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(Serialize), derive(Deserialize))] +#[cfg_attr( + all(feature = "serialize", feature = "bevy_reflect"), + reflect(Serialize, Deserialize) +)] pub struct Aabb2d { /// The minimum, conventionally bottom-left, point of the box pub min: Vec2, @@ -450,8 +463,17 @@ mod aabb2d_tests { use crate::primitives::Circle; /// A bounding circle -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(Serialize), derive(Deserialize))] +#[cfg_attr( + all(feature = "serialize", feature = "bevy_reflect"), + reflect(Serialize, Deserialize) +)] pub struct BoundingCircle { /// The center of the bounding circle pub center: Vec2, diff --git a/crates/bevy_math/src/bounding/bounded2d/primitive_impls.rs b/crates/bevy_math/src/bounding/bounded2d/primitive_impls.rs index e1fe6afd77..f55f40ddc6 100644 --- a/crates/bevy_math/src/bounding/bounded2d/primitive_impls.rs +++ b/crates/bevy_math/src/bounding/bounded2d/primitive_impls.rs @@ -4,8 +4,9 @@ use crate::{ bounding::BoundingVolume, ops, primitives::{ - Annulus, Arc2d, Capsule2d, Circle, CircularSector, CircularSegment, Ellipse, Line2d, - Plane2d, Polygon, Polyline2d, Rectangle, RegularPolygon, Rhombus, Segment2d, Triangle2d, + Annulus, Arc2d, Capsule2d, Circle, CircularSector, CircularSegment, ConvexPolygon, Ellipse, + Line2d, Plane2d, Polygon, Polyline2d, Rectangle, RegularPolygon, Rhombus, Segment2d, + Triangle2d, }, Dir2, Isometry2d, Mat2, Rot2, Vec2, }; @@ -375,6 +376,16 @@ impl Bounded2d for Polygon { } } +impl Bounded2d for ConvexPolygon { + fn aabb_2d(&self, isometry: impl Into) -> Aabb2d { + Aabb2d::from_point_cloud(isometry, self.vertices().as_slice()) + } + + fn bounding_circle(&self, isometry: impl Into) -> BoundingCircle { + BoundingCircle::from_point_cloud(isometry, self.vertices().as_slice()) + } +} + #[cfg(feature = "alloc")] impl Bounded2d for BoxedPolygon { fn aabb_2d(&self, isometry: impl Into) -> Aabb2d { @@ -438,6 +449,7 @@ impl Bounded2d for Capsule2d { } #[cfg(test)] +#[expect(clippy::print_stdout, reason = "Allowed in tests.")] mod tests { use core::f32::consts::{FRAC_PI_2, FRAC_PI_3, FRAC_PI_4, FRAC_PI_6, TAU}; use std::println; @@ -881,9 +893,9 @@ mod tests { #[test] fn segment() { + let segment = Segment2d::new(Vec2::new(-1.0, -0.5), Vec2::new(1.0, 0.5)); let translation = Vec2::new(2.0, 1.0); let isometry = Isometry2d::from_translation(translation); - let segment = Segment2d::new(Vec2::new(-1.0, -0.5), Vec2::new(1.0, 0.5)); let aabb = segment.aabb_2d(isometry); assert_eq!(aabb.min, Vec2::new(1.0, 0.5)); diff --git a/crates/bevy_math/src/bounding/bounded3d/extrusion.rs b/crates/bevy_math/src/bounding/bounded3d/extrusion.rs index 47d5d66763..607d0f2746 100644 --- a/crates/bevy_math/src/bounding/bounded3d/extrusion.rs +++ b/crates/bevy_math/src/bounding/bounded3d/extrusion.rs @@ -349,7 +349,7 @@ mod tests { #[test] fn segment() { let extrusion = Extrusion::new( - Segment2d::from_direction_and_length(Dir2::new_unchecked(Vec2::NEG_Y), 3.), + Segment2d::new(Vec2::new(0.0, -1.5), Vec2::new(0.0, 1.5)), 4.0, ); let translation = Vec3::new(3., 4., 5.); diff --git a/crates/bevy_math/src/bounding/bounded3d/mod.rs b/crates/bevy_math/src/bounding/bounded3d/mod.rs index c4f3c979f6..5a95b7711f 100644 --- a/crates/bevy_math/src/bounding/bounded3d/mod.rs +++ b/crates/bevy_math/src/bounding/bounded3d/mod.rs @@ -11,6 +11,11 @@ use crate::{ #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; +#[cfg(all(feature = "bevy_reflect", feature = "serialize"))] +use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; +#[cfg(feature = "serialize")] +use serde::{Deserialize, Serialize}; + pub use extrusion::BoundedExtrusion; /// Computes the geometric center of the given set of points. @@ -36,8 +41,17 @@ pub trait Bounded3d { } /// A 3D axis-aligned bounding box -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(Serialize), derive(Deserialize))] +#[cfg_attr( + all(feature = "serialize", feature = "bevy_reflect"), + reflect(Serialize, Deserialize) +)] pub struct Aabb3d { /// The minimum point of the box pub min: Vec3A, @@ -456,8 +470,17 @@ mod aabb3d_tests { use crate::primitives::Sphere; /// A bounding sphere -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(Serialize), derive(Deserialize))] +#[cfg_attr( + all(feature = "serialize", feature = "bevy_reflect"), + reflect(Serialize, Deserialize) +)] pub struct BoundingSphere { /// The center of the bounding sphere pub center: Vec3A, diff --git a/crates/bevy_math/src/bounding/bounded3d/primitive_impls.rs b/crates/bevy_math/src/bounding/bounded3d/primitive_impls.rs index 1f4e2a1666..ebfd0266e8 100644 --- a/crates/bevy_math/src/bounding/bounded3d/primitive_impls.rs +++ b/crates/bevy_math/src/bounding/bounded3d/primitive_impls.rs @@ -457,9 +457,8 @@ mod tests { #[test] fn segment() { - let translation = Vec3::new(2.0, 1.0, 0.0); - let segment = Segment3d::new(Vec3::new(-1.0, -0.5, 0.0), Vec3::new(1.0, 0.5, 0.0)); + let translation = Vec3::new(2.0, 1.0, 0.0); let aabb = segment.aabb_3d(translation); assert_eq!(aabb.min, Vec3A::new(1.0, 0.5, 0.0)); diff --git a/crates/bevy_math/src/bounding/raycast2d.rs b/crates/bevy_math/src/bounding/raycast2d.rs index 3b46bcfba6..e1def01936 100644 --- a/crates/bevy_math/src/bounding/raycast2d.rs +++ b/crates/bevy_math/src/bounding/raycast2d.rs @@ -9,7 +9,7 @@ use bevy_reflect::Reflect; /// A raycast intersection test for 2D bounding volumes #[derive(Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct RayCast2d { /// The ray for the test pub ray: Ray2d, @@ -109,7 +109,7 @@ impl IntersectsVolume for RayCast2d { /// An intersection test that casts an [`Aabb2d`] along a ray. #[derive(Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct AabbCast2d { /// The ray along which to cast the bounding volume pub ray: RayCast2d, @@ -147,7 +147,7 @@ impl IntersectsVolume for AabbCast2d { /// An intersection test that casts a [`BoundingCircle`] along a ray. #[derive(Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct BoundingCircleCast { /// The ray along which to cast the bounding volume pub ray: RayCast2d, diff --git a/crates/bevy_math/src/bounding/raycast3d.rs b/crates/bevy_math/src/bounding/raycast3d.rs index bfd5d17a0d..9086837f60 100644 --- a/crates/bevy_math/src/bounding/raycast3d.rs +++ b/crates/bevy_math/src/bounding/raycast3d.rs @@ -9,7 +9,7 @@ use bevy_reflect::Reflect; /// A raycast intersection test for 3D bounding volumes #[derive(Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct RayCast3d { /// The origin of the ray. pub origin: Vec3A, @@ -106,7 +106,7 @@ impl IntersectsVolume for RayCast3d { /// An intersection test that casts an [`Aabb3d`] along a ray. #[derive(Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct AabbCast3d { /// The ray along which to cast the bounding volume pub ray: RayCast3d, @@ -151,7 +151,7 @@ impl IntersectsVolume for AabbCast3d { /// An intersection test that casts a [`BoundingSphere`] along a ray. #[derive(Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct BoundingSphereCast { /// The ray along which to cast the bounding volume pub ray: RayCast3d, diff --git a/crates/bevy_math/src/common_traits.rs b/crates/bevy_math/src/common_traits.rs index a9a8ef910a..4e127f4026 100644 --- a/crates/bevy_math/src/common_traits.rs +++ b/crates/bevy_math/src/common_traits.rs @@ -80,6 +80,8 @@ impl VectorSpace for f32 { /// /// [vector spaces]: VectorSpace #[derive(Debug, Clone, Copy)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] pub struct Sum(pub V, pub W); impl Mul for Sum @@ -424,6 +426,9 @@ pub trait HasTangent { } /// A value with its derivative. +#[derive(Debug, Clone, Copy)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] pub struct WithDerivative where T: HasTangent, @@ -436,6 +441,9 @@ where } /// A value together with its first and second derivatives. +#[derive(Debug, Clone, Copy)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] pub struct WithTwoDerivatives where T: HasTangent, diff --git a/crates/bevy_math/src/compass.rs b/crates/bevy_math/src/compass.rs index 72dd817146..ea3d74c939 100644 --- a/crates/bevy_math/src/compass.rs +++ b/crates/bevy_math/src/compass.rs @@ -20,7 +20,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// ``` #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Hash, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Deserialize, Serialize) @@ -89,7 +93,11 @@ impl CompassQuadrant { /// ``` #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Hash, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Deserialize, Serialize) diff --git a/crates/bevy_math/src/cubic_splines/mod.rs b/crates/bevy_math/src/cubic_splines/mod.rs index 32e13f6720..6f60de774a 100644 --- a/crates/bevy_math/src/cubic_splines/mod.rs +++ b/crates/bevy_math/src/cubic_splines/mod.rs @@ -15,7 +15,7 @@ use {alloc::vec, alloc::vec::Vec, core::iter::once, itertools::Itertools}; /// A spline composed of a single cubic Bezier curve. /// /// Useful for user-drawn curves with local control, or animation easing. See -/// [`CubicSegment::new_bezier`] for use in easing. +/// [`CubicSegment::new_bezier_easing`] for use in easing. /// /// ### Interpolation /// @@ -51,7 +51,7 @@ use {alloc::vec, alloc::vec::Vec, core::iter::once, itertools::Itertools}; /// ``` #[derive(Clone, Debug)] #[cfg(feature = "alloc")] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct CubicBezier { /// The control points of the Bezier curve. pub control_points: Vec<[P; 4]>, @@ -73,20 +73,10 @@ impl CubicGenerator