Skip to content

Commit

Permalink
better tombstone value
Browse files Browse the repository at this point in the history
  • Loading branch information
ckampfe committed May 23, 2024
1 parent a02585a commit 6d32c0a
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 13 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,5 @@ I have known about Bitcask for a while, and I wanted to learn it by building a w
- [ ] clean up merging code
- [ ] clean up datamodel around records/entrypointers/mergepointers
- [ ] more research into how async drop interacts with disk writes/buffer flushes
- [x] investigate a better, less ambiguous tombstone value
- [ ] move record write_insert and write_delete into Record
9 changes: 3 additions & 6 deletions src/base.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use crate::keydir::{EntryPointer, EntryWithLiveness, Keydir, Liveness};
use crate::loadable::Loadable;
use crate::merge_pointer::MergePointer;
use crate::record::Tombstone;
use crate::record::Record;
use crate::Options;
use crate::{error, FlushBehavior};
use serde::de::DeserializeOwned;
Expand Down Expand Up @@ -395,10 +395,7 @@ where
source: e,
})?;

let encoded_value = bincode::serialize(&Tombstone).map_err(|e| error::SerializeError {
msg: "unable to serialize to bincode".to_string(),
source: e,
})?;
let encoded_value = Record::tombstone();

let key_size = encoded_key.len();

Expand All @@ -412,7 +409,7 @@ where
payload.extend_from_slice(&encoded_key_size);
payload.extend_from_slice(&encoded_value_size);
payload.extend_from_slice(&encoded_key);
payload.extend_from_slice(&encoded_value);
payload.extend_from_slice(encoded_value);

let hash = blake3::hash(&payload);
let hash = hash.as_bytes();
Expand Down
17 changes: 10 additions & 7 deletions src/record.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,12 @@
use std::sync::OnceLock;

use crate::keydir::Liveness;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use serde::de::DeserializeOwned;
use tokio::io::{AsyncRead, AsyncReadExt};

// TODO thse should probably something else.
// I think they serialize to `[]`, which is probably not good
#[derive(Serialize, Deserialize)]
pub(crate) struct Tombstone;
const TOMBSTONE_BYTES: &[u8] = b"bitcask_tombstone";

pub(crate) static TOMBSTONE: OnceLock<Vec<u8>> = OnceLock::new();
static TOMBSTONE: OnceLock<Vec<u8>> = OnceLock::new();

/// A record is a "header" and a "body"
/// The header is (in on-disk and in-memory order):
Expand Down Expand Up @@ -64,13 +61,19 @@ impl Record {
}

pub(crate) fn liveness(&self) -> Liveness {
if self.value_bytes() == TOMBSTONE.get_or_init(|| bincode::serialize(&Tombstone).unwrap()) {
if self.value_bytes()
== TOMBSTONE.get_or_init(|| bincode::serialize(&TOMBSTONE_BYTES).unwrap())
{
Liveness::Deleted
} else {
Liveness::Live
}
}

pub(crate) fn tombstone() -> &'static [u8] {
TOMBSTONE.get_or_init(|| bincode::serialize(&TOMBSTONE_BYTES).unwrap())
}

pub(crate) fn key_bytes(&self) -> &[u8] {
let start = 0;
let end = self.key_size() as usize;
Expand Down

0 comments on commit 6d32c0a

Please sign in to comment.