Skip to content

Commit

Permalink
Shrink containers if their capacity isn't needed
Browse files Browse the repository at this point in the history
This eagerly releases memory after retracting large amount of data, without
having to wait for another operation to happen.

Signed-off-by: Moritz Hoffmann <antiguru@gmail.com>
  • Loading branch information
antiguru committed Aug 18, 2023
1 parent 2b9ac68 commit 43b21f2
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 0 deletions.
8 changes: 8 additions & 0 deletions src/trace/implementations/ord.rs
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,7 @@ where
}
}
layer.vals.vals.vals.truncate(write_position);
layer.vals.vals.vals.maybe_shrink();
layer.vals.offs[layer.vals.keys.len()] = O::try_from(write_position).unwrap();

// 3. Remove values with empty histories. In addition, we need to update offsets
Expand All @@ -221,6 +222,7 @@ where
});
debug_assert_eq!(write_position, layer.vals.keys.len());
layer.vals.offs.truncate(write_position + 1);
layer.vals.offs.maybe_shrink();
layer.offs[layer.keys.len()] = O::try_from(write_position).unwrap();

// 4. Remove empty keys.
Expand All @@ -238,6 +240,9 @@ where
});
debug_assert_eq!(write_position, layer.keys.len());
layer.offs.truncate(layer.keys.len()+1);
layer.offs.maybe_shrink();
layer.vals.keys.maybe_shrink();
layer.keys.maybe_shrink();
}
}

Expand Down Expand Up @@ -568,6 +573,7 @@ where
}
}
layer.vals.vals.truncate(write_position);
layer.vals.vals.maybe_shrink();
layer.offs[layer.keys.len()] = O::try_from(write_position).unwrap();

// 4. Remove empty keys.
Expand All @@ -585,6 +591,8 @@ where
});
debug_assert_eq!(write_position, layer.keys.len());
layer.offs.truncate(layer.keys.len()+1);
layer.offs.maybe_shrink();
layer.keys.maybe_shrink();
}
}

Expand Down
10 changes: 10 additions & 0 deletions src/trace/layers/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,8 @@ pub trait BatchContainer: Default {
fn reserve(&mut self, additional: usize);
/// Creates a new container with sufficient capacity.
fn merge_capacity(cont1: &Self, cont2: &Self) -> Self;
/// Shrink the container if needed
fn maybe_shrink(&mut self);
}

impl<T: Clone> BatchContainer for Vec<T> {
Expand All @@ -147,6 +149,11 @@ impl<T: Clone> BatchContainer for Vec<T> {
fn merge_capacity(cont1: &Self, cont2: &Self) -> Self {
Vec::with_capacity(cont1.len() + cont2.len())
}
fn maybe_shrink(&mut self) {
if self.len() < self.capacity() / 4 {
self.shrink_to_fit();
}
}
}

impl<T: Columnation> BatchContainer for TimelyStack<T> {
Expand All @@ -173,6 +180,9 @@ impl<T: Columnation> BatchContainer for TimelyStack<T> {
new.reserve_regions(std::iter::once(cont1).chain(std::iter::once(cont2)));
new
}
fn maybe_shrink(&mut self) {
// Cannot shrink TimelyStack yet.
}
}


Expand Down

0 comments on commit 43b21f2

Please sign in to comment.