From 9a3a673b559d1d564118720fe9c4b1074a21de90 Mon Sep 17 00:00:00 2001 From: Vincent Herlemont Date: Mon, 4 Sep 2023 19:35:43 +0200 Subject: [PATCH] feat: implement mismatched model id + improvement --- Cargo.toml | 4 + README.md | 77 ++++++++++--------- benches/overhead.rs | 10 +-- benches/prepend_bytes.rs | 28 +++++++ native_model_macro/src/lib.rs | 2 +- native_model_macro/src/method/decode_body.rs | 15 +++- .../src/method/decode_upgrade_body.rs | 16 ++-- src/header.rs | 2 +- src/lib.rs | 13 +++- src/model.rs | 16 ++-- src/wrapper.rs | 38 +++------ tests/_experiment.rs | 58 +++++++------- tests/macro_decode_decode_upgrade.rs | 40 +++++++--- 13 files changed, 188 insertions(+), 131 deletions(-) create mode 100644 benches/prepend_bytes.rs diff --git a/Cargo.toml b/Cargo.toml index 3a92880..44faa2a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,5 +35,9 @@ harness = false name = "overhead_on_bincode" harness = false +[[bench]] +name = "prepend_bytes" +harness = false + [build-dependencies] skeptic = "0.13" \ No newline at end of file diff --git a/README.md b/README.md index 87b1538..e26268f 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ See [concepts](#concepts) for more details. versions of the data model. - **Data Consistency**: Ensure that we process the data expected model. - **Flexibility**: You can use any serialization format you want. More details [here](#setup-your-serialization-format). -- **Performance**: A minimal overhead. More details [here](#performance). +- **Performance**: A minimal overhead (encode: ~20 ns, decode: ~40 ps). More details [here](#performance). ## Usage @@ -71,10 +71,31 @@ When not to use it? - You need to have a human-readable format. (You can use a human-readable format like JSON wrapped in a native model, but you have to unwrap it to see the data correctly.) -# Status +## Status Early development. Not ready for production. +## Concepts + +In order to understand how the native model works, you need to understand the following concepts. + +- **Identity**(`id`): The identity is the unique identifier of the model. It is used to identify the model and + prevent to decode a model into the wrong Rust type. +- **Version**(`version`) The version is the version of the model. It is used to check the compatibility between two + models. +- **Encode**: The encode is the process of converting a model into a byte array. +- **Decode**: The decode is the process of converting a byte array into a model. +- **Downgrade**: The downgrade is the process of converting a model into a previous version of the model. +- **Upgrade**: The upgrade is the process of converting a model into a newer version of the model. + +Under the hood, the native model is a thin wrapper around serialized data. The `id` and the `version` are twice encoded with a [`little_endian::U32`](https://docs.rs/zerocopy/latest/zerocopy/byteorder/little_endian/type.U32.html). That represents 8 bytes, that are added at the beginning of the data. + +``` ++------------------+------------------+------------------------------------+ +| ID (4 bytes) | Version (4 bytes)| Data (indeterminate-length bytes) | ++------------------+------------------+------------------------------------+ +``` + ## Setup your serialization format First, you need to set up your serialization format. You can use any serialization format. @@ -146,46 +167,26 @@ struct Cord { Full example [here](tests/example/example_define_model.rs). -# Concepts +## Performance -In order to understand how the native model works, you need to understand the following concepts. +Native model has +been designed to have a minimal and constant overhead. That means that the overhead is the same +whatever the size of the data. Under the wood we use the [zerocopy](https://docs.rs/zerocopy/latest/zerocopy/) crate +to avoid unnecessary copies. -- **Identity**(`id`): The identity is the unique identifier of the model. It is used to identify the model and - prevent to decode a model into the wrong type. -- **Version**(`version`) The version is the version of the model. It is used to check the compatibility between two - models. -- **Encode**: The encode is the process of converting a model into a byte array. -- **Decode**: The decode is the process of converting a byte array into a model. -- **Downgrade**: The downgrade is the process of converting a model into a previous version of the model. -- **Upgrade**: The upgrade is the process of converting a model into a newer version of the model. +👉 To know the total time of the encode/decode, you need to add the time of your serialization format. -Under the hood, the native model is a thin wrapper around serialized data. The `id` and the `version` are twice encoded with a [`little_endian::U32`](https://docs.rs/zerocopy/latest/zerocopy/byteorder/little_endian/type.U32.html). That represents 8 bytes, that are added at the beginning of the data. +Resume: +- **Encode**: ~20 ns +- **Decode**: ~40 ps -``` -+------------------+------------------+------------------------------------+ -| ID (4 bytes) | Version (4 bytes)| Data (indeterminate-length bytes) | -+------------------+------------------+------------------------------------+ -``` - -# Performance - -This crate is in an early stage of development, so the performance should be improved in the future. -The goal is to have a minimal and constant overhead for all data sizes. It uses the [zerocopy](https://docs.rs/zerocopy/latest/zerocopy/) crate to avoid unnecessary copies. - -Current performance: -- Encode time: have overhead that evolves linearly with the data size. -- Decode time: have overhead of ~162 ps for all data sizes. - - -| data size | encode time (ns/ps/µs/ms) | decode time (ps) | -|:---------------------:|:--------------------------:|:----------------:| -| 1 B | 40.093 ns - 40.510 ns | 161.87 ps - 162.02 ps | -| 1 KiB (1024 B) | 116.45 ns - 116.83 ns | 161.85 ps - 162.08 ps | -| 1 MiB (1048576 B) | 66.697 µs - 67.634 µs | 161.87 ps - 162.18 ps | -| 10 MiB (10485760 B) | 1.5670 ms - 1.5843 ms | 162.40 ps - 163.52 ps | -| 100 MiB (104857600 B) | 63.778 ms - 64.132 ms | 162.71 ps - 165.10 ps | +| data size | encode time (ns) | decode time (ps) | +|:--------------------:|:---------------------:|:-----------------------:| +| 1 B | 19.769 ns - 20.154 ns | 40.526 ps - 40.617 ps | +| 1 KiB | 19.597 ns - 19.971 ns | 40.534 ps - 40.633 ps | +| 1 MiB | 19.662 ns - 19.910 ns | 40.508 ps - 40.632 ps | +| 10 MiB | 19.591 ns - 19.980 ns | 40.504 ps - 40.605 ps | +| 100 MiB | 19.669 ns - 19.867 ns | 40.520 ps - 40.644 ps | Benchmark of the native model overhead [here](benches/overhead.rs). -To know how much time it takes to encode/decode your data, you need to add this overhead to the time of your serialization format. - diff --git a/benches/overhead.rs b/benches/overhead.rs index 2946afe..a21750f 100644 --- a/benches/overhead.rs +++ b/benches/overhead.rs @@ -14,7 +14,7 @@ fn native_model_decode_body(data: Vec) -> Result); -fn wrapper(data: &mut Vec) { +fn wrap(data: &mut Vec) { native_model::wrapper::native_model_encode(data, 1, 1); } @@ -31,16 +31,16 @@ fn criterion_benchmark(c: &mut Criterion) { // encode let data = Data(vec![1; nb_bytes]); - let encode_body = native_model_encode_body(&data).unwrap(); + let mut encode_body = native_model_encode_body(&data).unwrap(); group.bench_function(BenchmarkId::new("encode", nb_bytes), |b| { - b.iter(|| wrapper(&mut encode_body.clone())) + b.iter(|| wrap(&mut encode_body)) }); // decode let data = Data(vec![1; nb_bytes]); - let encode_body = native_model::encode(&data).unwrap(); + let mut encode_body = native_model::encode(&data).unwrap(); group.bench_function(BenchmarkId::new("decode", nb_bytes), |b| { - b.iter(|| unwrap(&mut encode_body.clone())) + b.iter(|| unwrap(&mut encode_body)) }); } } diff --git a/benches/prepend_bytes.rs b/benches/prepend_bytes.rs new file mode 100644 index 0000000..7f77e37 --- /dev/null +++ b/benches/prepend_bytes.rs @@ -0,0 +1,28 @@ +/// Found a way to prepend bytes at the beginning of a Vec with a constant overhead. +use bincode::{Decode, Encode}; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; + +fn criterion_benchmark(c: &mut Criterion) { + let mut group = c.benchmark_group("encode"); + + // 1 byte, 1KB, 1MB, 10MB, 100MB + for nb_bytes in [1, 1024, 1024 * 1024, 10 * 1024 * 1024, 100 * 1024 * 1024].into_iter() { + group.throughput(criterion::Throughput::Bytes(nb_bytes as u64)); + + let header: Vec = vec![0; 4]; + let mut data: Vec = vec![1; nb_bytes]; + group.bench_function(BenchmarkId::new("prepend_bytes", nb_bytes), |b| { + b.iter(|| { + // Fastest way to prepend bytes to data + let mut header = header.clone(); + header.append(&mut data); + // prepend bytes to data + // let mut header = header.clone(); + // header.extend_from_slice(&data); + }); + }); + } +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/native_model_macro/src/lib.rs b/native_model_macro/src/lib.rs index 80fce1d..a048b68 100644 --- a/native_model_macro/src/lib.rs +++ b/native_model_macro/src/lib.rs @@ -94,7 +94,7 @@ pub fn native_model(args: TokenStream, input: TokenStream) -> TokenStream { let native_model_version_fn = generate_native_model_version(&attrs); let native_model_encode_body_fn = generate_native_model_encode_body(); let native_model_encode_downgrade_body_fn = generate_native_model_encode_downgrade_body(&attrs); - let native_model_decode_body_fn = generate_native_model_decode_body(); + let native_model_decode_body_fn = generate_native_model_decode_body(&attrs); let native_model_decode_upgrade_body_fn = generate_native_model_decode_upgrade_body(&attrs); let gen = quote! { diff --git a/native_model_macro/src/method/decode_body.rs b/native_model_macro/src/method/decode_body.rs index 9913787..97ad509 100644 --- a/native_model_macro/src/method/decode_body.rs +++ b/native_model_macro/src/method/decode_body.rs @@ -1,10 +1,17 @@ +use crate::ModelAttributes; use proc_macro2::TokenStream; use quote::quote; -pub(crate) fn generate_native_model_decode_body() -> TokenStream { +pub(crate) fn generate_native_model_decode_body(attrs: &ModelAttributes) -> TokenStream { + let id = attrs.id.clone().expect("id is required"); let gen = quote! { - fn native_model_decode_body(data: Vec) -> Result { - native_model_decode_body(data).map_err(|e| native_model::DecodeBodyError { + fn native_model_decode_body(data: Vec, id: u32) -> Result { + println!("id: {}, {}", id, #id); + if id != #id { + return Err(native_model::DecodeBodyError::MismatchedModelId); + } + + native_model_decode_body(data).map_err(|e| native_model::DecodeBodyError::DecodeError { msg: format!("{}", e), source: e.into(), }) @@ -12,4 +19,4 @@ pub(crate) fn generate_native_model_decode_body() -> TokenStream { }; gen.into() -} \ No newline at end of file +} diff --git a/native_model_macro/src/method/decode_upgrade_body.rs b/native_model_macro/src/method/decode_upgrade_body.rs index bc4ad5c..9e0d449 100644 --- a/native_model_macro/src/method/decode_upgrade_body.rs +++ b/native_model_macro/src/method/decode_upgrade_body.rs @@ -8,11 +8,11 @@ pub(crate) fn generate_native_model_decode_upgrade_body(attrs: &ModelAttributes) let model_from_or_try_from = if let Some(from) = native_model_from { quote! { - #from::native_model_decode_upgrade_body(data, x).map(|a| a.into()) + #from::native_model_decode_upgrade_body(data, id, version).map(|a| a.into()) } } else if let Some((try_from, error_try_from)) = native_model_try_from { quote! { - let result = #try_from::native_model_decode_upgrade_body(data, x).map(|b| { + let result = #try_from::native_model_decode_upgrade_body(data, id, version).map(|b| { b.try_into() .map_err(|e: #error_try_from| native_model::UpgradeError { msg: format!("{}", e), @@ -24,22 +24,22 @@ pub(crate) fn generate_native_model_decode_upgrade_body(attrs: &ModelAttributes) } else { quote! { Err(native_model::Error::UpgradeNotSupported { - from: x, + from: version, to: Self::native_model_version(), }) } }; let gen = quote! { - fn native_model_decode_upgrade_body(data: Vec, x: u32) -> native_model::Result { - if x == Self::native_model_version() { - let result = Self::native_model_decode_body(data)?; + fn native_model_decode_upgrade_body(data: Vec, id: u32, version: u32) -> native_model::Result { + if version == Self::native_model_version() { + let result = Self::native_model_decode_body(data, id)?; Ok(result) - } else if x < Self::native_model_version() { + } else if version < Self::native_model_version() { #model_from_or_try_from } else { Err(native_model::Error::UpgradeNotSupported { - from: x, + from: version, to: Self::native_model_version(), }) } diff --git a/src/header.rs b/src/header.rs index 5d09cce..db836fe 100644 --- a/src/header.rs +++ b/src/header.rs @@ -4,6 +4,6 @@ use zerocopy::{AsBytes, FromBytes, FromZeroes}; #[derive(FromZeroes, FromBytes, AsBytes, Debug)] #[repr(C)] pub struct Header { - pub(crate) type_id: U32, + pub(crate) id: U32, pub(crate) version: U32, } diff --git a/src/lib.rs b/src/lib.rs index 3b5dbe7..7fbbf7c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -56,10 +56,15 @@ pub type DecodeResult = std::result::Result; #[derive(Error, Debug)] #[error("Decode body error: {msg}")] -pub struct DecodeBodyError { - pub msg: String, - #[source] - pub source: anyhow::Error, +pub enum DecodeBodyError { + #[error("Mismatched model id")] + MismatchedModelId, + #[error("Decode error: {msg}")] + DecodeError { + msg: String, + #[source] + source: anyhow::Error, + }, } pub type EncodeResult = std::result::Result; diff --git a/src/model.rs b/src/model.rs index f1160af..08a9351 100644 --- a/src/model.rs +++ b/src/model.rs @@ -6,11 +6,11 @@ pub trait Model: Sized { // --------------- Decode --------------- - fn native_model_decode_body(data: Vec) -> DecodeResult + fn native_model_decode_body(data: Vec, id: u32) -> DecodeResult where Self: Sized; - fn native_model_decode_upgrade_body(data: Vec, version: u32) -> Result + fn native_model_decode_upgrade_body(data: Vec, id: u32, version: u32) -> Result where Self: Sized; @@ -19,9 +19,13 @@ pub trait Model: Sized { Self: Sized, { let native_model = crate::Wrapper::deserialize(&data[..]).unwrap(); + let source_id = native_model.get_id(); let source_version = native_model.get_version(); - let result = - Self::native_model_decode_upgrade_body(native_model.value().to_vec(), source_version)?; + let result = Self::native_model_decode_upgrade_body( + native_model.value().to_vec(), + source_id, + source_version, + )?; Ok((result, source_version)) } @@ -40,7 +44,7 @@ pub trait Model: Sized { Self: Sized, { let mut data = self.native_model_encode_body()?; - crate::native_model_encode( + let data = crate::native_model_encode( &mut data, Self::native_model_id(), Self::native_model_version(), @@ -54,7 +58,7 @@ pub trait Model: Sized { { let version = version.clone(); let mut data = self.native_model_encode_downgrade_body(version)?; - crate::native_model_encode(&mut data, Self::native_model_id(), version); + let data = crate::native_model_encode(&mut data, Self::native_model_id(), version); Ok(data) } } diff --git a/src/wrapper.rs b/src/wrapper.rs index cc45652..5b13443 100644 --- a/src/wrapper.rs +++ b/src/wrapper.rs @@ -23,7 +23,11 @@ impl Wrapper { } pub fn get_type_id(&self) -> u32 { - self.header.type_id.get() + self.header.id.get() + } + + pub fn get_id(&self) -> u32 { + self.header.id.get() } pub fn get_version(&self) -> u32 { @@ -33,7 +37,7 @@ impl Wrapper { impl Wrapper { pub fn set_type_id(&mut self, type_id: u32) { - self.header.type_id = U32::new(type_id); + self.header.id = U32::new(type_id); } pub fn set_version(&mut self, version: u32) { @@ -41,32 +45,14 @@ impl Wrapper { } } -pub fn native_model_encode(value: &mut Vec, type_id: u32, version: u32) { +pub fn native_model_encode(data: &mut Vec, type_id: u32, version: u32) -> Vec { let header = Header { - type_id: U32::new(type_id), + id: U32::new(type_id), version: U32::new(version), }; - let header = header.as_bytes(); - value.reserve(header.len()); - value.splice(..0, header.iter().cloned()); - - // Try to do with unsafe code to improve performance but benchmark shows that it's the same - // - // // Add header to the beginning of the vector - // unsafe { - // // get the raw pointer to the vector's buffer - // let ptr = value.as_mut_ptr(); - // - // // move the existing elements to the right - // ptr.offset(header.len() as isize) - // .copy_from_nonoverlapping(ptr, value.len()); - // - // // copy the elements from the header to the beginning of the vector - // ptr.copy_from_nonoverlapping(header.as_ptr(), header.len()); - // - // // update the length of the vector - // value.set_len(value.len() + header.len()); - // } + let mut header = header.as_bytes().to_vec(); + header.append(data); + header } #[cfg(test)] @@ -76,7 +62,7 @@ mod tests { #[test] fn native_model_deserialize_with_body() { let mut data = vec![0u8; 8]; - native_model_encode(&mut data, 200000, 100000); + let data = native_model_encode(&mut data, 200000, 100000); assert_eq!(data.len(), 16); let model = Wrapper::deserialize(&data[..]).unwrap(); assert_eq!(model.get_type_id(), 200000); diff --git a/tests/_experiment.rs b/tests/_experiment.rs index 42ddfff..9e7b71c 100644 --- a/tests/_experiment.rs +++ b/tests/_experiment.rs @@ -1,7 +1,6 @@ use bincode::{config, Decode, Encode}; use native_model::Result; use native_model::{DecodeBodyError, DecodeResult, EncodeBodyError, EncodeResult, Model}; - // Add this function to the macro for custom serialization fn native_model_encode(obj: &T) -> anyhow::Result> { let result = bincode::encode_to_vec(obj, config::standard())?; @@ -29,16 +28,16 @@ impl Model for A { 1 } - fn native_model_decode_upgrade_body(_data: Vec, x: u32) -> Result { + fn native_model_decode_upgrade_body(_data: Vec, _id: u32, version: u32) -> Result { println!( "A::deserialization_and_upgrade({}, {})", - x, + version, Self::native_model_version() ); - if x == Self::native_model_version() { + if version == Self::native_model_version() { Ok(Self {}) - } else if x < Self::native_model_version() { - panic!("The version {} not supported", x); + } else if version < Self::native_model_version() { + panic!("The version {} not supported", version); } else { panic!("Not implemented"); } @@ -54,11 +53,11 @@ impl Model for A { }) } - fn native_model_decode_body(data: Vec) -> DecodeResult + fn native_model_decode_body(data: Vec, _id: u32) -> DecodeResult where Self: Sized, { - native_model_decode(data).map_err(|e| DecodeBodyError { + native_model_decode(data).map_err(|e| DecodeBodyError::DecodeError { msg: format!("{}", e), source: e.into(), }) @@ -95,16 +94,16 @@ impl Model for B { 2 } - fn native_model_decode_upgrade_body(_data: Vec, x: u32) -> Result { + fn native_model_decode_upgrade_body(_data: Vec, id: u32, version: u32) -> Result { println!( "B::deserialization_and_upgrade({}, {})", - x, + version, Self::native_model_version() ); - if x == Self::native_model_version() { + if version == Self::native_model_version() { Ok(Self {}) - } else if x < Self::native_model_version() { - A::native_model_decode_upgrade_body(_data, x).map(|a| a.into()) + } else if version < Self::native_model_version() { + A::native_model_decode_upgrade_body(_data, id, version).map(|a| a.into()) } else { panic!("Not implemented"); } @@ -120,11 +119,11 @@ impl Model for B { }) } - fn native_model_decode_body(data: Vec) -> DecodeResult + fn native_model_decode_body(data: Vec, _id: u32) -> DecodeResult where Self: Sized, { - native_model_decode(data).map_err(|e| DecodeBodyError { + native_model_decode(data).map_err(|e| DecodeBodyError::DecodeError { msg: format!("{}", e), source: e.into(), }) @@ -173,16 +172,16 @@ impl Model for C { 3 } - fn native_model_decode_upgrade_body(_data: Vec, x: u32) -> Result { + fn native_model_decode_upgrade_body(_data: Vec, id: u32, version: u32) -> Result { println!( "C::deserialization_and_upgrade({}, {})", - x, + version, Self::native_model_version() ); - if x == Self::native_model_version() { + if version == Self::native_model_version() { Ok(Self {}) - } else if x < Self::native_model_version() { - let result = B::native_model_decode_upgrade_body(_data, x).map(|b| { + } else if version < Self::native_model_version() { + let result = B::native_model_decode_upgrade_body(_data, id, version).map(|b| { b.try_into() .map_err(|e: anyhow::Error| native_model::UpgradeError { msg: format!("{}", e), @@ -205,11 +204,11 @@ impl Model for C { }) } - fn native_model_decode_body(data: Vec) -> DecodeResult + fn native_model_decode_body(data: Vec, _id: u32) -> DecodeResult where Self: Sized, { - native_model_decode(data).map_err(|e| DecodeBodyError { + native_model_decode(data).map_err(|e| DecodeBodyError::DecodeError { msg: format!("{}", e), source: e.into(), }) @@ -289,16 +288,17 @@ fn test_encode_downgrade() { #[test] fn test_decode_upgrade() { - let x = 3; - let result = C::native_model_decode_upgrade_body(vec![], x); + let id = 1; + let version = 3; + let result = C::native_model_decode_upgrade_body(vec![], id, version); dbg!(&result); - let x = 2; - let result = C::native_model_decode_upgrade_body(vec![], x); + let version = 2; + let result = C::native_model_decode_upgrade_body(vec![], id, version); dbg!(&result); - let x = 1; - let result = C::native_model_decode_upgrade_body(vec![], x); + let version = 1; + let result = C::native_model_decode_upgrade_body(vec![], id, version); dbg!(&result); } @@ -311,7 +311,7 @@ where T: Model, { if model_id == T::native_model_id() { - T::native_model_decode_upgrade_body(_data, version) + T::native_model_decode_upgrade_body(_data, model_id, version) } else { panic!("The model id {} not supported", model_id); } diff --git a/tests/macro_decode_decode_upgrade.rs b/tests/macro_decode_decode_upgrade.rs index 39d598c..d007c1c 100644 --- a/tests/macro_decode_decode_upgrade.rs +++ b/tests/macro_decode_decode_upgrade.rs @@ -62,7 +62,7 @@ impl From for Foo2 { fn test_decode_foo1_to_foo2() { let foo1 = Foo1 { x: 100 }; let foo1_encoded = foo1.native_model_encode_body().unwrap(); - let foo2_decoded = Foo2::native_model_decode_upgrade_body(foo1_encoded, 1).unwrap(); + let foo2_decoded = Foo2::native_model_decode_upgrade_body(foo1_encoded, 1, 1).unwrap(); assert_eq!(foo1.x.to_string(), foo2_decoded.x); } @@ -72,7 +72,7 @@ fn test_decode_foo2_to_foo3() { x: "100".to_string(), }; let foo2_encoded = foo2.native_model_encode_body().unwrap(); - let foo3_decoded = Foo3::native_model_decode_upgrade_body(foo2_encoded, 2).unwrap(); + let foo3_decoded = Foo3::native_model_decode_upgrade_body(foo2_encoded, 1, 2).unwrap(); assert_eq!(Foo3::X(100), foo3_decoded); } @@ -80,7 +80,7 @@ fn test_decode_foo2_to_foo3() { fn test_decode_foo1_to_foo3() { let foo1 = Foo1 { x: 100 }; let foo1_encoded = foo1.native_model_encode_body().unwrap(); - let foo3_decoded = Foo3::native_model_decode_upgrade_body(foo1_encoded, 1).unwrap(); + let foo3_decoded = Foo3::native_model_decode_upgrade_body(foo1_encoded, 1, 1).unwrap(); assert_eq!(Foo3::X(100), foo3_decoded); } @@ -88,7 +88,7 @@ fn test_decode_foo1_to_foo3() { fn test_decode_foo1_to_foo1() { let foo1 = Foo1 { x: 100 }; let foo1_encoded = foo1.native_model_encode_body().unwrap(); - let foo1_decoded = Foo1::native_model_decode_upgrade_body(foo1_encoded, 1).unwrap(); + let foo1_decoded = Foo1::native_model_decode_upgrade_body(foo1_encoded, 1, 1).unwrap(); assert_eq!(foo1, foo1_decoded); } @@ -98,7 +98,7 @@ fn test_decode_foo2_to_foo2() { x: "100".to_string(), }; let foo2_encoded = foo2.native_model_encode_body().unwrap(); - let foo2_decoded = Foo2::native_model_decode_upgrade_body(foo2_encoded, 2).unwrap(); + let foo2_decoded = Foo2::native_model_decode_upgrade_body(foo2_encoded, 1, 2).unwrap(); assert_eq!(foo2, foo2_decoded); } @@ -106,7 +106,7 @@ fn test_decode_foo2_to_foo2() { fn test_decode_foo3_to_foo3() { let foo3 = Foo3::X(100); let foo3_encoded = foo3.native_model_encode_body().unwrap(); - let foo3_decoded = Foo3::native_model_decode_upgrade_body(foo3_encoded, 3).unwrap(); + let foo3_decoded = Foo3::native_model_decode_upgrade_body(foo3_encoded, 1, 3).unwrap(); assert_eq!(foo3, foo3_decoded); } @@ -114,7 +114,7 @@ fn test_decode_foo3_to_foo3() { fn test_should_fail_decode_foo3_to_foo2() { let foo3 = Foo3::X(100); let foo3_encoded = foo3.native_model_encode_body().unwrap(); - let foo3_decoded = Foo2::native_model_decode_upgrade_body(foo3_encoded, 3); + let foo3_decoded = Foo2::native_model_decode_upgrade_body(foo3_encoded, 1, 3); assert!(foo3_decoded.is_err()); assert!(matches!( foo3_decoded.unwrap_err(), @@ -126,7 +126,7 @@ fn test_should_fail_decode_foo3_to_foo2() { fn test_should_fail_decode_foo3_to_foo1() { let foo3 = Foo3::X(100); let foo3_encoded = foo3.native_model_encode_body().unwrap(); - let foo3_decoded = Foo1::native_model_decode_upgrade_body(foo3_encoded, 3); + let foo3_decoded = Foo1::native_model_decode_upgrade_body(foo3_encoded, 1, 3); assert!(foo3_decoded.is_err()); assert!(matches!( foo3_decoded.unwrap_err(), @@ -140,10 +140,32 @@ fn test_should_fail_decode_foo2_to_foo1() { x: "100".to_string(), }; let foo2_encoded = foo2.native_model_encode_body().unwrap(); - let foo2_decoded = Foo1::native_model_decode_upgrade_body(foo2_encoded, 2); + let foo2_decoded = Foo1::native_model_decode_upgrade_body(foo2_encoded, 1, 2); assert!(foo2_decoded.is_err()); assert!(matches!( foo2_decoded.unwrap_err(), native_model::Error::UpgradeNotSupported { from: 2, to: 1 } )); } + +#[derive(Debug, Encode, Decode, PartialEq)] +#[native_model(id = 2, version = 1)] +struct Foo1Bis { + x: i32, +} + +#[test] +fn test_prevent_to_decode_the_wrong_model() { + let foo1 = Foo1 { x: 100 }; + let foo1_encoded = foo1.native_model_encode_body().unwrap(); + let foo1_decoded = Foo1Bis::native_model_decode_upgrade_body(foo1_encoded, 1, 1); + dbg!(&foo1_decoded); + // assert!(foo1_decoded.is_err()); + // assert!(matches!( + // foo1_decoded.unwrap_err(), + // native_model::Error::TypeIdMismatch { + // expected: 1, + // actual: 1 + // } + // )); +}