Skip to content

Commit

Permalink
Fix clippy warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
coolreader18 committed Nov 19, 2024
1 parent 20258c8 commit 348c792
Show file tree
Hide file tree
Showing 10 changed files with 38 additions and 37 deletions.
2 changes: 2 additions & 0 deletions crates/bindings-sys/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ pub mod raw {
/// - `prefix = prefix_ptr[..prefix_len]`,
/// - `rstart = rstart_ptr[..rstart_len]`,
/// - `rend = rend_ptr[..rend_len]`,
///
/// in WASM memory.
///
/// The index itself has a schema/type.
Expand Down Expand Up @@ -171,6 +172,7 @@ pub mod raw {
/// - `prefix = prefix_ptr[..prefix_len]`,
/// - `rstart = rstart_ptr[..rstart_len]`,
/// - `rend = rend_ptr[..rend_len]`,
///
/// in WASM memory.
///
/// This syscall will delete all the rows found by
Expand Down
24 changes: 12 additions & 12 deletions crates/core/src/db/datastore/traits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,16 +65,16 @@ use spacetimedb_table::table::RowRef;
/// The ANSI SQL standard defined three anomalies in 1992:
///
/// - Dirty Reads: Occur when a transaction reads data written by a concurrent
/// uncommitted transaction.
/// uncommitted transaction.
///
/// - Non-repeatable Reads: Occur when a transaction reads the same row twice
/// and gets different data each time because another transaction has modified
/// the data in between the reads.
/// and gets different data each time because another transaction has modified
/// the data in between the reads.
///
/// - Phantom Reads: Occur when a transaction re-executes a query returning a
/// set of rows that satisfy a search condition and finds that the set of rows
/// satisfying the condition has changed due to another recently-committed
/// transaction.
/// set of rows that satisfy a search condition and finds that the set of rows
/// satisfying the condition has changed due to another recently-committed
/// transaction.
///
/// However since then database researchers have identified and cataloged many
/// more. See:
Expand All @@ -90,14 +90,14 @@ use spacetimedb_table::table::RowRef;
/// The following anomalies are not part of the SQL standard, but are important:
///
/// - Write Skew: Occurs when two transactions concurrently read the same data,
/// make decisions based on that data, and then write back modifications that
/// are mutually inconsistent with the decisions made by the other transaction,
/// despite no direct conflict on the same row being detected. e.g. I read what
/// you write and you read what I write.
/// make decisions based on that data, and then write back modifications that
/// are mutually inconsistent with the decisions made by the other transaction,
/// despite no direct conflict on the same row being detected. e.g. I read what
/// you write and you read what I write.
///
/// - Serialization Anomalies: Occur when the results of a set of transactions
/// are inconsistent with any serial execution of those transactions.

/// are inconsistent with any serial execution of those transactions.
///
/// PostgreSQL's documentation provides a good summary of the anomalies and
/// isolation levels that it supports:
///
Expand Down
2 changes: 2 additions & 0 deletions crates/core/src/host/wasmtime/wasm_instance_env.rs
Original file line number Diff line number Diff line change
Expand Up @@ -411,6 +411,7 @@ impl WasmInstanceEnv {
/// - `prefix = prefix_ptr[..prefix_len]`,
/// - `rstart = rstart_ptr[..rstart_len]`,
/// - `rend = rend_ptr[..rend_len]`,
///
/// in WASM memory.
///
/// The index itself has a schema/type.
Expand Down Expand Up @@ -693,6 +694,7 @@ impl WasmInstanceEnv {
/// - `prefix = prefix_ptr[..prefix_len]`,
/// - `rstart = rstart_ptr[..rstart_len]`,
/// - `rend = rend_ptr[..rend_len]`,
///
/// in WASM memory.
///
/// This syscall will delete all the rows found by
Expand Down
6 changes: 3 additions & 3 deletions crates/lib/src/identity.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ impl Identity {
pub const fn from_byte_array(bytes: [u8; 32]) -> Self {
// SAFETY: The transmute is an implementation of `u256::from_le_bytes`,
// but works in a const context.
Self::from_u256(u256::from_le(unsafe { mem::transmute(bytes) }))
Self::from_u256(u256::from_le(unsafe { mem::transmute::<[u8; 32], u256>(bytes) }))
}

/// Create an `Identity` from a BIG-ENDIAN byte array.
Expand All @@ -83,9 +83,9 @@ impl Identity {
/// [0xb0, 0xb1, 0xb2, ...]
/// ```
pub const fn from_be_byte_array(bytes: [u8; 32]) -> Self {
// SAFETY: The transmute is an implementation of `u256::from_le_bytes`,
// SAFETY: The transmute is an implementation of `u256::from_be_bytes`,
// but works in a const context.
Self::from_u256(u256::from_be(unsafe { mem::transmute(bytes) }))
Self::from_u256(u256::from_be(unsafe { mem::transmute::<[u8; 32], u256>(bytes) }))
}

/// Converts `__identity__: u256` to `Identity`.
Expand Down
10 changes: 5 additions & 5 deletions crates/table/src/bflatn_from.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ use spacetimedb_sats::{
/// 1. the `fixed_offset` must point at a row in `page` lasting `ty.size()` byte.
/// 2. the row must be a valid `ty`.
/// 3. for any `vlr: VarLenRef` stored in the row,
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
pub unsafe fn serialize_row_from_page<S: Serializer>(
ser: S,
page: &Page,
Expand Down Expand Up @@ -61,7 +61,7 @@ fn update<R>(curr_offset: CurrOffset<'_>, with: impl FnOnce(&mut usize) -> R) ->
/// SAFETY:
/// 1. the `value` must be valid at type `ty` and properly aligned for `ty`.
/// 2. for any `vlr: VarLenRef` stored in `value`,
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
unsafe fn serialize_product<S: Serializer>(
ser: S,
bytes: &Bytes,
Expand Down Expand Up @@ -100,7 +100,7 @@ unsafe fn serialize_product<S: Serializer>(
/// SAFETY:
/// 1. the `value` must be valid at type `ty` and properly aligned for `ty`.
/// 2. for any `vlr: VarLenRef` stored in `value`,
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
unsafe fn serialize_sum<S: Serializer>(
ser: S,
bytes: &Bytes,
Expand Down Expand Up @@ -165,7 +165,7 @@ impl_serialize!(['a] Value<'a>, (self, ser) => {
/// SAFETY:
/// 1. the `value` must be valid at type `ty` and properly aligned for `ty`.
/// 2. for any `vlr: VarLenRef` stored in `value`,
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
/// 3. `align_to(curr_offset.get(), ty.align())` must be the offset of a field typed at `ty`.
pub(crate) unsafe fn serialize_value<S: Serializer>(
ser: S,
Expand Down Expand Up @@ -255,7 +255,7 @@ pub(crate) unsafe fn serialize_value<S: Serializer>(
/// SAFETY:
/// 1. the `value` must be valid at type `::String` and properly aligned for `::String``.
/// 2. for any `vlr: VarLenRef` stored in `value`,
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
unsafe fn serialize_string<S: Serializer>(
ser: S,
bytes: &Bytes,
Expand Down
5 changes: 1 addition & 4 deletions crates/table/src/btree_index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,10 +92,7 @@ impl Iterator for BTreeIndexRangeIter<'_> {
type Item = RowPointer;

fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|ptr| {
self.num_pointers_yielded += 1;
ptr
})
self.iter.next().inspect(|_| self.num_pointers_yielded += 1)
}
}

Expand Down
6 changes: 3 additions & 3 deletions crates/table/src/eq.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use super::{
///
/// 1. `fixed_offset_a/b` are valid offsets for rows typed at `ty` in `page_a/b`.
/// 2. for any `vlr_a/b: VarLenRef` in the fixed parts of row `a` and `b`,
/// `vlr_a/b.first_offset` must either be `NULL` or point to a valid granule in `page_a/b`.
/// `vlr_a/b.first_offset` must either be `NULL` or point to a valid granule in `page_a/b`.
pub unsafe fn eq_row_in_page(
page_a: &Page,
page_b: &Page,
Expand Down Expand Up @@ -85,7 +85,7 @@ struct EqCtx<'page_a, 'page_b> {
/// SAFETY:
/// 1. `value_a/b` must be valid at type `ty` and properly aligned for `ty`.
/// 2. for any `vlr_a/b: VarLenRef` stored in `value_a/b`,
/// `vlr_a/b.first_offset` must either be `NULL` or point to a valid granule in `page_a/b`.
/// `vlr_a/b.first_offset` must either be `NULL` or point to a valid granule in `page_a/b`.
unsafe fn eq_product(ctx: &mut EqCtx<'_, '_>, ty: &ProductTypeLayout) -> bool {
let base_offset = ctx.curr_offset;
ty.elements.iter().all(|elem_ty| {
Expand All @@ -106,7 +106,7 @@ unsafe fn eq_product(ctx: &mut EqCtx<'_, '_>, ty: &ProductTypeLayout) -> bool {
/// SAFETY:
/// 1. `value_a/b` must both be valid at type `ty` and properly aligned for `ty`.
/// 2. for any `vlr_a/b: VarLenRef` stored in `value_a/b`,
/// `vlr_a/b.first_offset` must either be `NULL` or point to a valid granule in `page_a/b`.
/// `vlr_a/b.first_offset` must either be `NULL` or point to a valid granule in `page_a/b`.
unsafe fn eq_value(ctx: &mut EqCtx<'_, '_>, ty: &AlgebraicTypeLayout) -> bool {
debug_assert_eq!(
ctx.curr_offset,
Expand Down
6 changes: 3 additions & 3 deletions crates/table/src/eq_to_pv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ use spacetimedb_sats::{AlgebraicValue, ProductValue};
///
/// 1. `fixed_offset` is a valid offset for row `lhs` typed at `ty` in `page`.
/// 2. for any `vlr: VarLenRef` in the fixed parts of row `lhs`,
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
pub unsafe fn eq_row_in_page_to_pv(
blob_store: &dyn BlobStore,
page: &Page,
Expand Down Expand Up @@ -74,7 +74,7 @@ struct EqCtx<'page> {
/// SAFETY:
/// 1. `lhs` must be valid at type `ty` and properly aligned for `ty`.
/// 2. for any `vlr: VarLenRef` stored in `lhs`,
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `ctx.lhs.page`.
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `ctx.lhs.page`.
unsafe fn eq_product(ctx: &mut EqCtx<'_>, ty: &ProductTypeLayout, rhs: &ProductValue) -> bool {
let base_offset = ctx.curr_offset;
ty.elements.len() == rhs.elements.len()
Expand All @@ -96,7 +96,7 @@ unsafe fn eq_product(ctx: &mut EqCtx<'_>, ty: &ProductTypeLayout, rhs: &ProductV
/// SAFETY:
/// 1. `lhs` must both be valid at type `ty` and properly aligned for `ty`.
/// 2. for any `vlr: VarLenRef` stored in `lhs`,
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `ctx.lhs.page`.
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `ctx.lhs.page`.
unsafe fn eq_value(ctx: &mut EqCtx<'_>, ty: &AlgebraicTypeLayout, rhs: &AlgebraicValue) -> bool {
debug_assert_eq!(
ctx.curr_offset,
Expand Down
8 changes: 4 additions & 4 deletions crates/table/src/page.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1416,7 +1416,7 @@ impl Page {
/// of all past, present, and future rows in this page and future rows in this page.
///
/// - The `var_len_visitor` must visit the same set of `VarLenRef`s in the row
/// as the visitor provided to `insert_row`.
/// as the visitor provided to `insert_row`.
pub unsafe fn delete_row(
&mut self,
fixed_row: PageOffset,
Expand Down Expand Up @@ -1464,10 +1464,10 @@ impl Page {
/// # Safety
///
/// - `fixed_row_offset` must refer to a previously-allocated and initialized row in `self`,
/// and must not have been de-allocated. In other words, the fixed row must be *valid*.
/// and must not have been de-allocated. In other words, the fixed row must be *valid*.
///
/// - `fixed_row_size` and `var_len_visitor` must be consistent with each other
/// and with all other calls to any methods on `self`.
/// and with all other calls to any methods on `self`.
pub unsafe fn row_total_granules(
&self,
fixed_row_offset: PageOffset,
Expand Down Expand Up @@ -1622,7 +1622,7 @@ impl Page {
/// - `src_vlr.first_granule` must point to a valid granule or be NULL.
///
/// - To avoid leaving dangling uninitialized allocations in `dst_var`,
/// `dst_var` must already be checked to have enough size to store `src_vlr`
/// `dst_var` must already be checked to have enough size to store `src_vlr`
/// using `Self::has_space_for_row`.
unsafe fn copy_var_len_into(
&self,
Expand Down
6 changes: 3 additions & 3 deletions crates/table/src/row_hash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ use spacetimedb_sats::{algebraic_value::ser::concat_byte_chunks_buf, bsatn::Dese
/// 1. the `fixed_offset` must point at a row in `page` lasting `ty.size()` bytes.
/// 2. the row must be a valid `ty`.
/// 3. for any `vlr: VarLenRef` stored in the row,
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
pub unsafe fn hash_row_in_page(
hasher: &mut impl Hasher,
page: &Page,
Expand All @@ -49,7 +49,7 @@ pub unsafe fn hash_row_in_page(
/// SAFETY:
/// 1. the `value` must be valid at type `ty` and properly aligned for `ty`.
/// 2. for any `vlr: VarLenRef` stored in `value`,
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
unsafe fn hash_product(
hasher: &mut impl Hasher,
bytes: &Bytes,
Expand Down Expand Up @@ -78,7 +78,7 @@ unsafe fn hash_product(
/// SAFETY:
/// 1. the `value` must be valid at type `ty` and properly aligned for `ty`.
/// 2. for any `vlr: VarLenRef` stored in `value`,
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
/// `vlr.first_offset` must either be `NULL` or point to a valid granule in `page`.
unsafe fn hash_value(
hasher: &mut impl Hasher,
bytes: &Bytes,
Expand Down

0 comments on commit 348c792

Please sign in to comment.