Fix doc_markdown lints in bevy_tasks (#3481)

#3457 adds the `doc_markdown` clippy lint, which checks doc comments to make sure code identifiers are escaped with backticks. This causes a lot of lint errors, so this is one of a number of PR's that will fix those lint errors one crate at a time.

This PR fixes lints in the `bevy_tasks` crate.
This commit is contained in:
Michael Dorst 2021-12-29 17:38:13 +00:00
parent b1e4984c55
commit b32770d303
5 changed files with 18 additions and 18 deletions

View File

@ -21,7 +21,7 @@ pub struct CountdownEvent {
} }
impl CountdownEvent { impl CountdownEvent {
/// Creates a CountdownEvent that must be decremented `n` times for listeners to be /// Creates a [`CountdownEvent`] that must be decremented `n` times for listeners to be
/// signalled /// signalled
pub fn new(n: isize) -> Self { pub fn new(n: isize) -> Self {
let inner = CountdownEventInner { let inner = CountdownEventInner {

View File

@ -3,14 +3,14 @@ use crate::TaskPool;
mod adapters; mod adapters;
pub use adapters::*; pub use adapters::*;
/// ParallelIterator closely emulates the std::iter::Iterator /// [`ParallelIterator`] closely emulates the `std::iter::Iterator`
/// interface. However, it uses bevy_task to compute batches in parallel. /// interface. However, it uses `bevy_task` to compute batches in parallel.
/// ///
/// Note that the overhead of ParallelIterator is high relative to some /// Note that the overhead of [`ParallelIterator`] is high relative to some
/// workloads. In particular, if the batch size is too small or task being /// workloads. In particular, if the batch size is too small or task being
/// run in parallel is inexpensive, *a ParallelIterator could take longer /// run in parallel is inexpensive, *a [`ParallelIterator`] could take longer
/// than a normal Iterator*. Therefore, you should profile your code before /// than a normal [`Iterator`]*. Therefore, you should profile your code before
/// using ParallelIterator. /// using [`ParallelIterator`].
pub trait ParallelIterator<B> pub trait ParallelIterator<B>
where where
B: Iterator<Item = Self::Item> + Send, B: Iterator<Item = Self::Item> + Send,
@ -21,7 +21,7 @@ where
/// Returns the next batch of items for processing. /// Returns the next batch of items for processing.
/// ///
/// Each batch is an iterator with items of the same type as the /// Each batch is an iterator with items of the same type as the
/// ParallelIterator. Returns `None` when there are no batches left. /// [`ParallelIterator`]. Returns `None` when there are no batches left.
fn next_batch(&mut self) -> Option<B>; fn next_batch(&mut self) -> Option<B>;
/// Returns the bounds on the remaining number of items in the /// Returns the bounds on the remaining number of items in the

View File

@ -12,12 +12,12 @@ use std::{
/// more gracefully and wait until it stops running, use the [`cancel()`][Task::cancel()] method. /// more gracefully and wait until it stops running, use the [`cancel()`][Task::cancel()] method.
/// ///
/// Tasks that panic get immediately canceled. Awaiting a canceled task also causes a panic. /// Tasks that panic get immediately canceled. Awaiting a canceled task also causes a panic.
/// Wraps async_executor::Task /// Wraps `async_executor::Task`
#[derive(Debug)] #[derive(Debug)]
pub struct Task<T>(async_executor::Task<T>); pub struct Task<T>(async_executor::Task<T>);
impl<T> Task<T> { impl<T> Task<T> {
/// Creates a new task from a given async_executor::Task /// Creates a new task from a given `async_executor::Task`
pub fn new(task: async_executor::Task<T>) -> Self { pub fn new(task: async_executor::Task<T>) -> Self {
Self(task) Self(task)
} }

View File

@ -10,7 +10,7 @@ use futures_lite::{future, pin};
use crate::Task; use crate::Task;
/// Used to create a TaskPool /// Used to create a [`TaskPool`]
#[derive(Debug, Default, Clone)] #[derive(Debug, Default, Clone)]
pub struct TaskPoolBuilder { pub struct TaskPoolBuilder {
/// If set, we'll set up the thread pool to use at most n threads. Otherwise use /// If set, we'll set up the thread pool to use at most n threads. Otherwise use
@ -24,7 +24,7 @@ pub struct TaskPoolBuilder {
} }
impl TaskPoolBuilder { impl TaskPoolBuilder {
/// Creates a new TaskPoolBuilder instance /// Creates a new [`TaskPoolBuilder`] instance
pub fn new() -> Self { pub fn new() -> Self {
Self::default() Self::default()
} }
@ -43,13 +43,13 @@ impl TaskPoolBuilder {
} }
/// Override the name of the threads created for the pool. If set, threads will /// Override the name of the threads created for the pool. If set, threads will
/// be named <thread_name> (<thread_index>), i.e. "MyThreadPool (2)" /// be named `<thread_name> (<thread_index>)`, i.e. `MyThreadPool (2)`
pub fn thread_name(mut self, thread_name: String) -> Self { pub fn thread_name(mut self, thread_name: String) -> Self {
self.thread_name = Some(thread_name); self.thread_name = Some(thread_name);
self self
} }
/// Creates a new ThreadPoolBuilder based on the current options. /// Creates a new [`TaskPool`] based on the current options.
pub fn build(self) -> TaskPool { pub fn build(self) -> TaskPool {
TaskPool::new_internal( TaskPool::new_internal(
self.num_threads, self.num_threads,
@ -156,7 +156,7 @@ impl TaskPool {
self.inner.threads.len() self.inner.threads.len()
} }
/// Allows spawning non-`static futures on the thread pool. The function takes a callback, /// Allows spawning non-`'static` futures on the thread pool. The function takes a callback,
/// passing a scope object into it. The scope object provided to the callback can be used /// passing a scope object into it. The scope object provided to the callback can be used
/// to spawn tasks. This function will await the completion of all tasks before returning. /// to spawn tasks. This function will await the completion of all tasks before returning.
/// ///

View File

@ -1,9 +1,9 @@
//! Definitions for a few common task pools that we want. Generally the determining factor for what //! Definitions for a few common task pools that we want. Generally the determining factor for what
//! kind of work should go in each pool is latency requirements. //! kind of work should go in each pool is latency requirements.
//! //!
//! For CPU-intensive work (tasks that generally spin until completion) we have a standard Compute //! For CPU-intensive work (tasks that generally spin until completion) we have a standard
//! pool and an AsyncCompute pool. Work that does not need to be completed to present the next //! [`ComputeTaskPool`] and an [`AsyncComputeTaskPool`]. Work that does not need to be completed to
//! frame should go to the AsyncCompute pool //! present the next frame should go to the [`AsyncComputeTaskPool`]
//! //!
//! For IO-intensive work (tasks that spend very little time in a "woken" state) we have an IO //! For IO-intensive work (tasks that spend very little time in a "woken" state) we have an IO
//! task pool. The tasks here are expected to complete very quickly. Generally they should just //! task pool. The tasks here are expected to complete very quickly. Generally they should just