From b2c8c5a12879cf8421e2ab5c18a074bf26746ec9 Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Wed, 6 Jan 2021 13:39:59 +0100 Subject: [PATCH 1/9] adds the new command AuthenticatorLargeBlobs --- src/ctap/command.rs | 236 ++++++++++++++++- src/ctap/credential_management.rs | 2 +- src/ctap/large_blobs.rs | 422 ++++++++++++++++++++++++++++++ src/ctap/mod.rs | 12 +- src/ctap/pin_protocol_v1.rs | 2 +- src/ctap/response.rs | 35 +++ src/ctap/storage.rs | 209 +++++++++++++++ src/ctap/storage/key.rs | 5 + 8 files changed, 914 insertions(+), 9 deletions(-) create mode 100644 src/ctap/large_blobs.rs diff --git a/src/ctap/command.rs b/src/ctap/command.rs index 128c4b49..2e1fe3b7 100644 --- a/src/ctap/command.rs +++ b/src/ctap/command.rs @@ -22,6 +22,7 @@ use super::data_formats::{ }; use super::key_material; use super::status_code::Ctap2StatusCode; +use super::storage::MAX_LARGE_BLOB_ARRAY_SIZE; use alloc::string::String; use alloc::vec::Vec; use arrayref::array_ref; @@ -33,6 +34,9 @@ use core::convert::TryFrom; // You might also want to set the max credential size in process_get_info then. pub const MAX_CREDENTIAL_COUNT_IN_LIST: Option = None; +// This constant is a consequence of the structure of messages. +const MIN_LARGE_BLOB_LEN: usize = 17; + // CTAP specification (version 20190130) section 6.1 #[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug, PartialEq))] pub enum Command { @@ -44,8 +48,8 @@ pub enum Command { AuthenticatorGetNextAssertion, AuthenticatorCredentialManagement(AuthenticatorCredentialManagementParameters), AuthenticatorSelection, + AuthenticatorLargeBlobs(AuthenticatorLargeBlobsParameters), AuthenticatorConfig(AuthenticatorConfigParameters), - // TODO(kaczmarczyck) implement FIDO 2.1 commands (see below consts) // Vendor specific commands AuthenticatorVendorConfigure(AuthenticatorVendorConfigureParameters), } @@ -56,8 +60,6 @@ impl From for Ctap2StatusCode { } } -// TODO: Remove this `allow(dead_code)` once the constants are used. -#[allow(dead_code)] impl Command { const AUTHENTICATOR_MAKE_CREDENTIAL: u8 = 0x01; const AUTHENTICATOR_GET_ASSERTION: u8 = 0x02; @@ -65,8 +67,8 @@ impl Command { const AUTHENTICATOR_CLIENT_PIN: u8 = 0x06; const AUTHENTICATOR_RESET: u8 = 0x07; const AUTHENTICATOR_GET_NEXT_ASSERTION: u8 = 0x08; - // TODO(kaczmarczyck) use or remove those constants - const AUTHENTICATOR_BIO_ENROLLMENT: u8 = 0x09; + // Implement Bio Enrollment when your hardware supports biometrics. + const _AUTHENTICATOR_BIO_ENROLLMENT: u8 = 0x09; const AUTHENTICATOR_CREDENTIAL_MANAGEMENT: u8 = 0x0A; const AUTHENTICATOR_SELECTION: u8 = 0x0B; const AUTHENTICATOR_LARGE_BLOBS: u8 = 0x0C; @@ -123,6 +125,12 @@ impl Command { // Parameters are ignored. Ok(Command::AuthenticatorSelection) } + Command::AUTHENTICATOR_LARGE_BLOBS => { + let decoded_cbor = cbor::read(&bytes[1..])?; + Ok(Command::AuthenticatorLargeBlobs( + AuthenticatorLargeBlobsParameters::try_from(decoded_cbor)?, + )) + } Command::AUTHENTICATOR_CONFIG => { let decoded_cbor = cbor::read(&bytes[1..])?; Ok(Command::AuthenticatorConfig( @@ -351,6 +359,81 @@ impl TryFrom for AuthenticatorClientPinParameters { } } +#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug, PartialEq))] +pub struct AuthenticatorLargeBlobsParameters { + pub get: Option, + pub set: Option>, + pub offset: usize, + pub length: Option, + pub pin_uv_auth_param: Option>, + pub pin_uv_auth_protocol: Option, +} + +impl TryFrom for AuthenticatorLargeBlobsParameters { + type Error = Ctap2StatusCode; + + fn try_from(cbor_value: cbor::Value) -> Result { + destructure_cbor_map! { + let { + 1 => get, + 2 => set, + 3 => offset, + 4 => length, + 5 => pin_uv_auth_param, + 6 => pin_uv_auth_protocol, + } = extract_map(cbor_value)?; + } + + // careful: some missing parameters here are CTAP1_ERR_INVALID_PARAMETER + let get = get.map(extract_unsigned).transpose()?.map(|u| u as usize); + let set = set.map(extract_byte_string).transpose()?; + let offset = + extract_unsigned(offset.ok_or(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)?)? as usize; + let length = length + .map(extract_unsigned) + .transpose()? + .map(|u| u as usize); + let pin_uv_auth_param = pin_uv_auth_param.map(extract_byte_string).transpose()?; + let pin_uv_auth_protocol = pin_uv_auth_protocol.map(extract_unsigned).transpose()?; + + if get.is_none() && set.is_none() { + return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER); + } + if get.is_some() && set.is_some() { + return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER); + } + if get.is_some() + && (length.is_some() || pin_uv_auth_param.is_some() || pin_uv_auth_protocol.is_some()) + { + return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER); + } + if set.is_some() && offset == 0 { + match length { + None => return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER), + Some(len) if len > MAX_LARGE_BLOB_ARRAY_SIZE => { + return Err(Ctap2StatusCode::CTAP2_ERR_LARGE_BLOB_STORAGE_FULL) + } + Some(len) if len < MIN_LARGE_BLOB_LEN => { + return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER) + } + Some(_) => (), + } + } + if set.is_some() && offset != 0 && length.is_some() { + return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER); + } + + Ok(AuthenticatorLargeBlobsParameters { + get, + set, + offset, + length, + pin_uv_auth_param, + pin_uv_auth_protocol, + }) + } +} + #[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug, PartialEq))] pub struct AuthenticatorConfigParameters { pub sub_command: ConfigSubCommand, @@ -698,6 +781,149 @@ mod test { assert_eq!(command, Ok(Command::AuthenticatorSelection)); } + #[test] + fn test_from_cbor_large_blobs_parameters() { + // successful get + let cbor_value = cbor_map! { + 1 => 2, + 3 => 4, + }; + let returned_large_blobs_parameters = + AuthenticatorLargeBlobsParameters::try_from(cbor_value).unwrap(); + let expected_large_blobs_parameters = AuthenticatorLargeBlobsParameters { + get: Some(2), + set: None, + offset: 4, + length: None, + pin_uv_auth_param: None, + pin_uv_auth_protocol: None, + }; + assert_eq!( + returned_large_blobs_parameters, + expected_large_blobs_parameters + ); + + // successful first set + let cbor_value = cbor_map! { + 2 => vec! [0x5E], + 3 => 0, + 4 => MIN_LARGE_BLOB_LEN as u64, + 5 => vec! [0xA9], + 6 => 1, + }; + let returned_large_blobs_parameters = + AuthenticatorLargeBlobsParameters::try_from(cbor_value).unwrap(); + let expected_large_blobs_parameters = AuthenticatorLargeBlobsParameters { + get: None, + set: Some(vec![0x5E]), + offset: 0, + length: Some(MIN_LARGE_BLOB_LEN), + pin_uv_auth_param: Some(vec![0xA9]), + pin_uv_auth_protocol: Some(1), + }; + assert_eq!( + returned_large_blobs_parameters, + expected_large_blobs_parameters + ); + + // successful next set + let cbor_value = cbor_map! { + 2 => vec! [0x5E], + 3 => 1, + 5 => vec! [0xA9], + 6 => 1, + }; + let returned_large_blobs_parameters = + AuthenticatorLargeBlobsParameters::try_from(cbor_value).unwrap(); + let expected_large_blobs_parameters = AuthenticatorLargeBlobsParameters { + get: None, + set: Some(vec![0x5E]), + offset: 1, + length: None, + pin_uv_auth_param: Some(vec![0xA9]), + pin_uv_auth_protocol: Some(1), + }; + assert_eq!( + returned_large_blobs_parameters, + expected_large_blobs_parameters + ); + + // failing with neither get nor set + let cbor_value = cbor_map! { + 3 => 4, + 5 => vec! [0xA9], + 6 => 1, + }; + assert_eq!( + AuthenticatorLargeBlobsParameters::try_from(cbor_value), + Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER) + ); + + // failing with get and set + let cbor_value = cbor_map! { + 1 => 2, + 2 => vec! [0x5E], + 3 => 4, + 5 => vec! [0xA9], + 6 => 1, + }; + assert_eq!( + AuthenticatorLargeBlobsParameters::try_from(cbor_value), + Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER) + ); + + // failing with get and length + let cbor_value = cbor_map! { + 1 => 2, + 3 => 4, + 4 => MIN_LARGE_BLOB_LEN as u64, + 5 => vec! [0xA9], + 6 => 1, + }; + assert_eq!( + AuthenticatorLargeBlobsParameters::try_from(cbor_value), + Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER) + ); + + // failing with zero offset and no length present + let cbor_value = cbor_map! { + 2 => vec! [0x5E], + 3 => 0, + 5 => vec! [0xA9], + 6 => 1, + }; + assert_eq!( + AuthenticatorLargeBlobsParameters::try_from(cbor_value), + Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER) + ); + + // failing with length smaller than minimum + let cbor_value = cbor_map! { + 2 => vec! [0x5E], + 3 => 0, + 4 => MIN_LARGE_BLOB_LEN as u64 - 1, + 5 => vec! [0xA9], + 6 => 1, + }; + assert_eq!( + AuthenticatorLargeBlobsParameters::try_from(cbor_value), + Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER) + ); + + // failing with non-zero offset and length present + let cbor_value = cbor_map! { + 2 => vec! [0x5E], + 3 => 4, + 4 => MIN_LARGE_BLOB_LEN as u64, + 5 => vec! [0xA9], + 6 => 1, + }; + assert_eq!( + AuthenticatorLargeBlobsParameters::try_from(cbor_value), + Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER) + ); + } + #[test] fn test_vendor_configure() { // Incomplete command diff --git a/src/ctap/credential_management.rs b/src/ctap/credential_management.rs index 7681fa12..7665ea78 100644 --- a/src/ctap/credential_management.rs +++ b/src/ctap/credential_management.rs @@ -100,7 +100,7 @@ fn enumerate_credentials_response( public_key: Some(public_key), total_credentials, cred_protect: cred_protect_policy, - // TODO(kaczmarczyck) add when largeBlobKey is implemented + // TODO(kaczmarczyck) add when largeBlobKey extension is implemented large_blob_key: None, ..Default::default() }) diff --git a/src/ctap/large_blobs.rs b/src/ctap/large_blobs.rs new file mode 100644 index 00000000..32934e93 --- /dev/null +++ b/src/ctap/large_blobs.rs @@ -0,0 +1,422 @@ +// Copyright 2020-2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::check_pin_uv_auth_protocol; +use super::command::AuthenticatorLargeBlobsParameters; +use super::pin_protocol_v1::{PinPermission, PinProtocolV1}; +use super::response::{AuthenticatorLargeBlobsResponse, ResponseData}; +use super::status_code::Ctap2StatusCode; +use super::storage::PersistentStore; +use alloc::vec; +use alloc::vec::Vec; +use byteorder::{ByteOrder, LittleEndian}; +use crypto::sha256::Sha256; +use crypto::Hash256; + +/// This is maximum message size supported by the authenticator. 1024 is the default. +/// Increasing this values can speed up commands with longer responses, but lead to +/// packets dropping or unexpected failures. +pub const MAX_MSG_SIZE: usize = 1024; +/// The length of the truncated hash that as appended to the large blob data. +const TRUNCATED_HASH_LEN: usize = 16; + +pub struct LargeBlobs { + buffer: Vec, + expected_length: usize, + expected_next_offset: usize, +} + +/// Implements the logic for the AuthenticatorLargeBlobs command and keeps its state. +impl LargeBlobs { + pub fn new() -> LargeBlobs { + LargeBlobs { + buffer: Vec::new(), + expected_length: 0, + expected_next_offset: 0, + } + } + + /// Process the large blob command. + pub fn process_command( + &mut self, + persistent_store: &mut PersistentStore, + pin_protocol_v1: &mut PinProtocolV1, + large_blobs_params: AuthenticatorLargeBlobsParameters, + ) -> Result { + let AuthenticatorLargeBlobsParameters { + get, + set, + offset, + length, + pin_uv_auth_param, + pin_uv_auth_protocol, + } = large_blobs_params; + + const MAX_FRAGMENT_LENGTH: usize = MAX_MSG_SIZE - 64; + + if let Some(get) = get { + if get > MAX_FRAGMENT_LENGTH { + return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_LENGTH); + } + let config = persistent_store.get_large_blob_array(get, offset)?; + return Ok(ResponseData::AuthenticatorLargeBlobs(Some( + AuthenticatorLargeBlobsResponse { config }, + ))); + } + + if let Some(mut set) = set { + if set.len() > MAX_FRAGMENT_LENGTH { + return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_LENGTH); + } + if offset == 0 { + // Checks for offset and length are already done in command. + self.expected_length = + length.ok_or(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)?; + self.expected_next_offset = 0; + } + if offset != self.expected_next_offset { + return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_SEQ); + } + if persistent_store.pin_hash()?.is_some() { + let pin_uv_auth_param = + pin_uv_auth_param.ok_or(Ctap2StatusCode::CTAP2_ERR_PUAT_REQUIRED)?; + // TODO(kaczmarczyck) Error codes for PIN protocol differ across commands. + // Change to Ctap2StatusCode::CTAP2_ERR_PUAT_REQUIRED for None? + check_pin_uv_auth_protocol(pin_uv_auth_protocol)?; + pin_protocol_v1.has_permission(PinPermission::LargeBlobWrite)?; + let mut message = vec![0xFF; 32]; + message.extend(&[0x0C, 0x00]); + let mut offset_bytes = [0u8; 4]; + LittleEndian::write_u32(&mut offset_bytes, offset as u32); + message.extend(&offset_bytes); + message.extend(&Sha256::hash(set.as_slice())); + if !pin_protocol_v1.verify_pin_auth_token(&message, &pin_uv_auth_param) { + return Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID); + } + } + if offset + set.len() > self.expected_length { + return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER); + } + if offset == 0 { + self.buffer = Vec::with_capacity(self.expected_length); + } + self.buffer.append(&mut set); + self.expected_next_offset = self.buffer.len(); + if self.expected_next_offset == self.expected_length { + self.expected_length = 0; + self.expected_next_offset = 0; + // Must be a positive number. + let buffer_hash_index = self.buffer.len() - TRUNCATED_HASH_LEN; + if Sha256::hash(&self.buffer[..buffer_hash_index])[..TRUNCATED_HASH_LEN] + != self.buffer[buffer_hash_index..] + { + self.buffer = Vec::new(); + return Err(Ctap2StatusCode::CTAP2_ERR_INTEGRITY_FAILURE); + } + persistent_store.commit_large_blob_array(&self.buffer)?; + self.buffer = Vec::new(); + } + return Ok(ResponseData::AuthenticatorLargeBlobs(None)); + } + + // This should be unreachable, since the command has either get or set. + Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crypto::rng256::ThreadRng256; + + #[test] + fn test_process_command_get_empty() { + let mut rng = ThreadRng256 {}; + let mut persistent_store = PersistentStore::new(&mut rng); + let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng); + let pin_uv_auth_token = [0x55; 32]; + let mut pin_protocol_v1 = PinProtocolV1::new_test(key_agreement_key, pin_uv_auth_token); + let mut large_blobs = LargeBlobs::new(); + + let large_blob = vec![ + 0x80, 0x76, 0xbe, 0x8b, 0x52, 0x8d, 0x00, 0x75, 0xf7, 0xaa, 0xe9, 0x8d, 0x6f, 0xa5, + 0x7a, 0x6d, 0x3c, + ]; + let large_blobs_params = AuthenticatorLargeBlobsParameters { + get: Some(large_blob.len()), + set: None, + offset: 0, + length: None, + pin_uv_auth_param: None, + pin_uv_auth_protocol: None, + }; + let large_blobs_response = large_blobs.process_command( + &mut persistent_store, + &mut pin_protocol_v1, + large_blobs_params, + ); + match large_blobs_response.unwrap() { + ResponseData::AuthenticatorLargeBlobs(Some(response)) => { + assert_eq!(response.config, large_blob); + } + _ => panic!("Invalid response type"), + }; + } + + #[test] + fn test_process_command_commit_and_get() { + let mut rng = ThreadRng256 {}; + let mut persistent_store = PersistentStore::new(&mut rng); + let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng); + let pin_uv_auth_token = [0x55; 32]; + let mut pin_protocol_v1 = PinProtocolV1::new_test(key_agreement_key, pin_uv_auth_token); + let mut large_blobs = LargeBlobs::new(); + + const BLOB_LEN: usize = 200; + const DATA_LEN: usize = BLOB_LEN - TRUNCATED_HASH_LEN; + let mut large_blob = vec![0x1B; DATA_LEN]; + large_blob.extend_from_slice(&Sha256::hash(&large_blob[..])[..TRUNCATED_HASH_LEN]); + + let large_blobs_params = AuthenticatorLargeBlobsParameters { + get: None, + set: Some(large_blob[..BLOB_LEN / 2].to_vec()), + offset: 0, + length: Some(BLOB_LEN), + pin_uv_auth_param: None, + pin_uv_auth_protocol: None, + }; + let large_blobs_response = large_blobs.process_command( + &mut persistent_store, + &mut pin_protocol_v1, + large_blobs_params, + ); + assert_eq!( + large_blobs_response, + Ok(ResponseData::AuthenticatorLargeBlobs(None)) + ); + + let large_blobs_params = AuthenticatorLargeBlobsParameters { + get: None, + set: Some(large_blob[BLOB_LEN / 2..].to_vec()), + offset: BLOB_LEN / 2, + length: None, + pin_uv_auth_param: None, + pin_uv_auth_protocol: None, + }; + let large_blobs_response = large_blobs.process_command( + &mut persistent_store, + &mut pin_protocol_v1, + large_blobs_params, + ); + assert_eq!( + large_blobs_response, + Ok(ResponseData::AuthenticatorLargeBlobs(None)) + ); + + let large_blobs_params = AuthenticatorLargeBlobsParameters { + get: Some(BLOB_LEN), + set: None, + offset: 0, + length: None, + pin_uv_auth_param: None, + pin_uv_auth_protocol: None, + }; + let large_blobs_response = large_blobs.process_command( + &mut persistent_store, + &mut pin_protocol_v1, + large_blobs_params, + ); + match large_blobs_response.unwrap() { + ResponseData::AuthenticatorLargeBlobs(Some(response)) => { + assert_eq!(response.config, large_blob); + } + _ => panic!("Invalid response type"), + }; + } + + #[test] + fn test_process_command_commit_unexpected_offset() { + let mut rng = ThreadRng256 {}; + let mut persistent_store = PersistentStore::new(&mut rng); + let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng); + let pin_uv_auth_token = [0x55; 32]; + let mut pin_protocol_v1 = PinProtocolV1::new_test(key_agreement_key, pin_uv_auth_token); + let mut large_blobs = LargeBlobs::new(); + + const BLOB_LEN: usize = 200; + const DATA_LEN: usize = BLOB_LEN - TRUNCATED_HASH_LEN; + let mut large_blob = vec![0x1B; DATA_LEN]; + large_blob.extend_from_slice(&Sha256::hash(&large_blob[..])[..TRUNCATED_HASH_LEN]); + + let large_blobs_params = AuthenticatorLargeBlobsParameters { + get: None, + set: Some(large_blob[..BLOB_LEN / 2].to_vec()), + offset: 0, + length: Some(BLOB_LEN), + pin_uv_auth_param: None, + pin_uv_auth_protocol: None, + }; + let large_blobs_response = large_blobs.process_command( + &mut persistent_store, + &mut pin_protocol_v1, + large_blobs_params, + ); + assert_eq!( + large_blobs_response, + Ok(ResponseData::AuthenticatorLargeBlobs(None)) + ); + + let large_blobs_params = AuthenticatorLargeBlobsParameters { + get: None, + set: Some(large_blob[BLOB_LEN / 2..].to_vec()), + // The offset is 1 too big. + offset: BLOB_LEN / 2 + 1, + length: None, + pin_uv_auth_param: None, + pin_uv_auth_protocol: None, + }; + let large_blobs_response = large_blobs.process_command( + &mut persistent_store, + &mut pin_protocol_v1, + large_blobs_params, + ); + assert_eq!( + large_blobs_response, + Err(Ctap2StatusCode::CTAP1_ERR_INVALID_SEQ), + ); + } + + #[test] + fn test_process_command_commit_unexpected_length() { + let mut rng = ThreadRng256 {}; + let mut persistent_store = PersistentStore::new(&mut rng); + let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng); + let pin_uv_auth_token = [0x55; 32]; + let mut pin_protocol_v1 = PinProtocolV1::new_test(key_agreement_key, pin_uv_auth_token); + let mut large_blobs = LargeBlobs::new(); + + const BLOB_LEN: usize = 200; + const DATA_LEN: usize = BLOB_LEN - TRUNCATED_HASH_LEN; + let mut large_blob = vec![0x1B; DATA_LEN]; + large_blob.extend_from_slice(&Sha256::hash(&large_blob[..])[..TRUNCATED_HASH_LEN]); + + let large_blobs_params = AuthenticatorLargeBlobsParameters { + get: None, + set: Some(large_blob[..BLOB_LEN / 2].to_vec()), + offset: 0, + // The length is 1 too small. + length: Some(BLOB_LEN - 1), + pin_uv_auth_param: None, + pin_uv_auth_protocol: None, + }; + let large_blobs_response = large_blobs.process_command( + &mut persistent_store, + &mut pin_protocol_v1, + large_blobs_params, + ); + assert_eq!( + large_blobs_response, + Ok(ResponseData::AuthenticatorLargeBlobs(None)) + ); + + let large_blobs_params = AuthenticatorLargeBlobsParameters { + get: None, + set: Some(large_blob[BLOB_LEN / 2..].to_vec()), + offset: BLOB_LEN / 2, + length: None, + pin_uv_auth_param: None, + pin_uv_auth_protocol: None, + }; + let large_blobs_response = large_blobs.process_command( + &mut persistent_store, + &mut pin_protocol_v1, + large_blobs_params, + ); + assert_eq!( + large_blobs_response, + Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER), + ); + } + + #[test] + fn test_process_command_commit_unexpected_hash() { + let mut rng = ThreadRng256 {}; + let mut persistent_store = PersistentStore::new(&mut rng); + let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng); + let pin_uv_auth_token = [0x55; 32]; + let mut pin_protocol_v1 = PinProtocolV1::new_test(key_agreement_key, pin_uv_auth_token); + let mut large_blobs = LargeBlobs::new(); + + const BLOB_LEN: usize = 20; + // This blob does not have an appropriate hash. + let large_blob = vec![0x1B; BLOB_LEN]; + + let large_blobs_params = AuthenticatorLargeBlobsParameters { + get: None, + set: Some(large_blob.to_vec()), + offset: 0, + length: Some(BLOB_LEN), + pin_uv_auth_param: None, + pin_uv_auth_protocol: None, + }; + let large_blobs_response = large_blobs.process_command( + &mut persistent_store, + &mut pin_protocol_v1, + large_blobs_params, + ); + assert_eq!( + large_blobs_response, + Err(Ctap2StatusCode::CTAP2_ERR_INTEGRITY_FAILURE), + ); + } + + #[test] + fn test_process_command_commit_with_pin() { + let mut rng = ThreadRng256 {}; + let mut persistent_store = PersistentStore::new(&mut rng); + let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng); + let pin_uv_auth_token = [0x55; 32]; + let mut pin_protocol_v1 = PinProtocolV1::new_test(key_agreement_key, pin_uv_auth_token); + let mut large_blobs = LargeBlobs::new(); + + const BLOB_LEN: usize = 20; + const DATA_LEN: usize = BLOB_LEN - TRUNCATED_HASH_LEN; + let mut large_blob = vec![0x1B; DATA_LEN]; + large_blob.extend_from_slice(&Sha256::hash(&large_blob[..])[..TRUNCATED_HASH_LEN]); + + persistent_store.set_pin(&[0u8; 16], 4).unwrap(); + let pin_uv_auth_param = Some(vec![ + 0x68, 0x0C, 0x3F, 0x6A, 0x62, 0x47, 0xE6, 0x7C, 0x23, 0x1F, 0x79, 0xE3, 0xDC, 0x6D, + 0xC3, 0xDE, + ]); + + let large_blobs_params = AuthenticatorLargeBlobsParameters { + get: None, + set: Some(large_blob), + offset: 0, + length: Some(BLOB_LEN), + pin_uv_auth_param, + pin_uv_auth_protocol: Some(1), + }; + let large_blobs_response = large_blobs.process_command( + &mut persistent_store, + &mut pin_protocol_v1, + large_blobs_params, + ); + assert_eq!( + large_blobs_response, + Ok(ResponseData::AuthenticatorLargeBlobs(None)) + ); + } +} diff --git a/src/ctap/mod.rs b/src/ctap/mod.rs index 6f105893..95b5b0a1 100644 --- a/src/ctap/mod.rs +++ b/src/ctap/mod.rs @@ -21,6 +21,7 @@ mod ctap1; pub mod data_formats; pub mod hid; mod key_material; +mod large_blobs; mod pin_protocol_v1; pub mod response; pub mod status_code; @@ -41,6 +42,7 @@ use self::data_formats::{ SignatureAlgorithm, }; use self::hid::ChannelID; +use self::large_blobs::{LargeBlobs, MAX_MSG_SIZE}; use self::pin_protocol_v1::{PinPermission, PinProtocolV1}; use self::response::{ AuthenticatorGetAssertionResponse, AuthenticatorGetInfoResponse, @@ -293,6 +295,7 @@ pub struct CtapState<'a, R: Rng256, CheckUserPresence: Fn(ChannelID) -> Result<( pub u2f_up_state: U2fUserPresenceState, // The state initializes to Reset and its timeout, and never goes back to Reset. stateful_command_permission: StatefulPermission, + large_blobs: LargeBlobs, } impl<'a, R, CheckUserPresence> CtapState<'a, R, CheckUserPresence> @@ -318,6 +321,7 @@ where Duration::from_ms(TOUCH_TIMEOUT_MS), ), stateful_command_permission: StatefulPermission::new_reset(now), + large_blobs: LargeBlobs::new(), } } @@ -484,12 +488,16 @@ where ) } Command::AuthenticatorSelection => self.process_selection(cid), + Command::AuthenticatorLargeBlobs(params) => self.large_blobs.process_command( + &mut self.persistent_store, + &mut self.pin_protocol_v1, + params, + ), Command::AuthenticatorConfig(params) => process_config( &mut self.persistent_store, &mut self.pin_protocol_v1, params, ), - // TODO(kaczmarczyck) implement FIDO 2.1 commands // Vendor specific commands Command::AuthenticatorVendorConfigure(params) => { self.process_vendor_configure(params, cid) @@ -1026,7 +1034,7 @@ where ]), aaguid: self.persistent_store.aaguid()?, options: Some(options_map), - max_msg_size: Some(1024), + max_msg_size: Some(MAX_MSG_SIZE as u64), pin_protocols: Some(vec![PIN_PROTOCOL_VERSION]), max_credential_count_in_list: MAX_CREDENTIAL_COUNT_IN_LIST.map(|c| c as u64), max_credential_id_length: Some(CREDENTIAL_ID_SIZE as u64), diff --git a/src/ctap/pin_protocol_v1.rs b/src/ctap/pin_protocol_v1.rs index eb537f0a..6ec5644e 100644 --- a/src/ctap/pin_protocol_v1.rs +++ b/src/ctap/pin_protocol_v1.rs @@ -162,7 +162,7 @@ pub enum PinPermission { GetAssertion = 0x02, CredentialManagement = 0x04, BioEnrollment = 0x08, - PlatformConfiguration = 0x10, + LargeBlobWrite = 0x10, AuthenticatorConfiguration = 0x20, } diff --git a/src/ctap/response.rs b/src/ctap/response.rs index e4cda5ec..245218f5 100644 --- a/src/ctap/response.rs +++ b/src/ctap/response.rs @@ -33,6 +33,7 @@ pub enum ResponseData { AuthenticatorReset, AuthenticatorCredentialManagement(Option), AuthenticatorSelection, + AuthenticatorLargeBlobs(Option), // TODO(kaczmarczyck) dummy, extend AuthenticatorConfig, AuthenticatorVendor(AuthenticatorVendorResponse), @@ -49,6 +50,7 @@ impl From for Option { ResponseData::AuthenticatorReset => None, ResponseData::AuthenticatorCredentialManagement(data) => data.map(|d| d.into()), ResponseData::AuthenticatorSelection => None, + ResponseData::AuthenticatorLargeBlobs(data) => data.map(|d| d.into()), ResponseData::AuthenticatorConfig => None, ResponseData::AuthenticatorVendor(data) => Some(data.into()), } @@ -204,6 +206,22 @@ impl From for cbor::Value { } } +#[cfg_attr(test, derive(PartialEq))] +#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug))] +pub struct AuthenticatorLargeBlobsResponse { + pub config: Vec, +} + +impl From for cbor::Value { + fn from(platform_large_blobs_response: AuthenticatorLargeBlobsResponse) -> Self { + let AuthenticatorLargeBlobsResponse { config } = platform_large_blobs_response; + + cbor_map_options! { + 0x01 => config, + } + } +} + #[derive(Default)] #[cfg_attr(test, derive(PartialEq))] #[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug))] @@ -510,6 +528,23 @@ mod test { assert_eq!(response_cbor, None); } + #[test] + fn test_large_blobs_into_cbor() { + let large_blobs_response = AuthenticatorLargeBlobsResponse { config: vec![0xC0] }; + let response_cbor: Option = + ResponseData::AuthenticatorLargeBlobs(Some(large_blobs_response)).into(); + let expected_cbor = cbor_map_options! { + 0x01 => vec![0xC0], + }; + assert_eq!(response_cbor, Some(expected_cbor)); + } + + #[test] + fn test_empty_large_blobs_into_cbor() { + let response_cbor: Option = ResponseData::AuthenticatorLargeBlobs(None).into(); + assert_eq!(response_cbor, None); + } + #[test] fn test_config_into_cbor() { let response_cbor: Option = ResponseData::AuthenticatorConfig.into(); diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index b85f918c..a89c02d9 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -28,6 +28,7 @@ use alloc::vec; use alloc::vec::Vec; use arrayref::array_ref; use cbor::cbor_array_vec; +use core::cmp; use core::convert::TryInto; use crypto::rng256::Rng256; use persistent_store::StoreUpdate; @@ -59,6 +60,9 @@ const DEFAULT_MIN_PIN_LENGTH_RP_IDS: Vec = Vec::new(); // This constant is an attempt to limit storage requirements. If you don't set it to 0, // the stored strings can still be unbounded, but that is true for all RP IDs. pub const MAX_RP_IDS_LENGTH: usize = 8; +const SHARD_SIZE: usize = 128; +pub const MAX_LARGE_BLOB_ARRAY_SIZE: usize = + SHARD_SIZE * (key::LARGE_BLOB_SHARDS.end - key::LARGE_BLOB_SHARDS.start); /// Wrapper for master keys. pub struct MasterKeys { @@ -467,6 +471,70 @@ impl PersistentStore { )?) } + /// Reads the byte vector stored as the serialized large blobs array. + /// + /// If more data is requested than stored, return as many bytes as possible. + pub fn get_large_blob_array( + &self, + mut byte_count: usize, + mut offset: usize, + ) -> Result, Ctap2StatusCode> { + if self.store.find(key::LARGE_BLOB_SHARDS.start)?.is_none() { + return Ok(vec![ + 0x80, 0x76, 0xbe, 0x8b, 0x52, 0x8d, 0x00, 0x75, 0xf7, 0xaa, 0xe9, 0x8d, 0x6f, 0xa5, + 0x7a, 0x6d, 0x3c, + ]); + } + let mut output = Vec::with_capacity(byte_count); + while byte_count > 0 { + let shard = offset / SHARD_SIZE; + let shard_offset = offset % SHARD_SIZE; + let shard_length = cmp::min(SHARD_SIZE - shard_offset, byte_count); + + let shard_key = key::LARGE_BLOB_SHARDS.start + shard; + if !key::LARGE_BLOB_SHARDS.contains(&shard_key) { + // This request should have been caught at application level. + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); + } + let shard_entry = self.store.find(shard_key)?.unwrap_or_default(); + if shard_entry.len() < shard_offset + shard_length { + output.extend(&shard_entry[..]); + return Ok(output); + } + output.extend(&shard_entry[shard_offset..shard_offset + shard_length]); + offset += shard_length; + byte_count -= shard_length; + } + Ok(output) + } + + /// Sets a byte vector as the serialized large blobs array. + pub fn commit_large_blob_array( + &mut self, + large_blob_array: &[u8], + ) -> Result<(), Ctap2StatusCode> { + let mut large_blob_index = 0; + let mut shard_key = key::LARGE_BLOB_SHARDS.start; + while large_blob_index < large_blob_array.len() { + if !key::LARGE_BLOB_SHARDS.contains(&shard_key) { + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); + } + let shard_length = cmp::min(SHARD_SIZE, large_blob_array.len() - large_blob_index); + self.store.insert( + shard_key, + &large_blob_array[large_blob_index..large_blob_index + shard_length], + )?; + large_blob_index += shard_length; + shard_key += 1; + } + // The length is not stored, so overwrite old entries explicitly. + for key in shard_key..key::LARGE_BLOB_SHARDS.end { + // Assuming the store optimizes out unnecessary writes. + self.store.remove(key)?; + } + Ok(()) + } + /// Returns the attestation private key if defined. pub fn attestation_private_key( &self, @@ -1144,6 +1212,147 @@ mod test { assert_eq!(persistent_store.min_pin_length_rp_ids().unwrap(), rp_ids); } + #[test] + #[allow(clippy::assertions_on_constants)] + fn test_max_large_blob_array_size() { + assert!(MAX_LARGE_BLOB_ARRAY_SIZE >= 1024); + } + + #[test] + fn test_commit_get_large_blob_array_1_shard() { + let mut rng = ThreadRng256 {}; + let mut persistent_store = PersistentStore::new(&mut rng); + + let large_blob_array = vec![0xC0; 1]; + assert!(persistent_store + .commit_large_blob_array(&large_blob_array) + .is_ok()); + let restored_large_blob_array = persistent_store.get_large_blob_array(1, 0).unwrap(); + assert_eq!(large_blob_array, restored_large_blob_array); + + let large_blob_array = vec![0xC0; SHARD_SIZE]; + assert!(persistent_store + .commit_large_blob_array(&large_blob_array) + .is_ok()); + let restored_large_blob_array = persistent_store + .get_large_blob_array(SHARD_SIZE, 0) + .unwrap(); + assert_eq!(large_blob_array, restored_large_blob_array); + let restored_large_blob_array = persistent_store + .get_large_blob_array(SHARD_SIZE + 1, 0) + .unwrap(); + assert_eq!(large_blob_array, restored_large_blob_array); + } + + #[test] + fn test_commit_get_large_blob_array_2_shards() { + let mut rng = ThreadRng256 {}; + let mut persistent_store = PersistentStore::new(&mut rng); + + let large_blob_array = vec![0xC0; SHARD_SIZE + 1]; + assert!(persistent_store + .commit_large_blob_array(&large_blob_array) + .is_ok()); + let restored_large_blob_array = persistent_store + .get_large_blob_array(SHARD_SIZE, 0) + .unwrap(); + assert_eq!( + large_blob_array[..SHARD_SIZE], + restored_large_blob_array[..] + ); + let restored_large_blob_array = persistent_store + .get_large_blob_array(SHARD_SIZE + 1, 0) + .unwrap(); + assert_eq!(large_blob_array, restored_large_blob_array); + + let large_blob_array = vec![0xC0; 2 * SHARD_SIZE]; + assert!(persistent_store + .commit_large_blob_array(&large_blob_array) + .is_ok()); + let restored_large_blob_array = persistent_store + .get_large_blob_array(2 * SHARD_SIZE, 0) + .unwrap(); + assert_eq!(large_blob_array, restored_large_blob_array); + let restored_large_blob_array = persistent_store + .get_large_blob_array(2 * SHARD_SIZE + 1, 0) + .unwrap(); + assert_eq!(large_blob_array, restored_large_blob_array); + } + + #[test] + fn test_commit_get_large_blob_array_3_shards() { + let mut rng = ThreadRng256 {}; + let mut persistent_store = PersistentStore::new(&mut rng); + + let mut large_blob_array = vec![0x11; SHARD_SIZE]; + large_blob_array.extend([0x22; SHARD_SIZE].iter()); + large_blob_array.extend([0x33; 1].iter()); + assert!(persistent_store + .commit_large_blob_array(&large_blob_array) + .is_ok()); + let restored_large_blob_array = persistent_store + .get_large_blob_array(2 * SHARD_SIZE + 1, 0) + .unwrap(); + assert_eq!(large_blob_array, restored_large_blob_array); + let restored_large_blob_array = persistent_store + .get_large_blob_array(3 * SHARD_SIZE, 0) + .unwrap(); + assert_eq!(large_blob_array, restored_large_blob_array); + let shard1 = persistent_store + .get_large_blob_array(SHARD_SIZE, 0) + .unwrap(); + let shard2 = persistent_store + .get_large_blob_array(SHARD_SIZE, SHARD_SIZE) + .unwrap(); + let shard3 = persistent_store + .get_large_blob_array(1, 2 * SHARD_SIZE) + .unwrap(); + assert_eq!(large_blob_array[..SHARD_SIZE], shard1[..]); + assert_eq!(large_blob_array[SHARD_SIZE..2 * SHARD_SIZE], shard2[..]); + assert_eq!(large_blob_array[2 * SHARD_SIZE..], shard3[..]); + let shard12 = persistent_store + .get_large_blob_array(2, SHARD_SIZE - 1) + .unwrap(); + let shard23 = persistent_store + .get_large_blob_array(2, 2 * SHARD_SIZE - 1) + .unwrap(); + assert_eq!(vec![0x11, 0x22], shard12); + assert_eq!(vec![0x22, 0x33], shard23); + } + + #[test] + fn test_commit_get_large_blob_array_overwrite() { + let mut rng = ThreadRng256 {}; + let mut persistent_store = PersistentStore::new(&mut rng); + + let large_blob_array = vec![0x11; SHARD_SIZE + 1]; + assert!(persistent_store + .commit_large_blob_array(&large_blob_array) + .is_ok()); + let large_blob_array = vec![0x22; SHARD_SIZE]; + assert!(persistent_store + .commit_large_blob_array(&large_blob_array) + .is_ok()); + let restored_large_blob_array = persistent_store + .get_large_blob_array(SHARD_SIZE + 1, 0) + .unwrap(); + assert_eq!(large_blob_array, restored_large_blob_array); + let restored_large_blob_array = persistent_store + .get_large_blob_array(1, SHARD_SIZE) + .unwrap(); + assert_eq!(Vec::::new(), restored_large_blob_array); + + assert!(persistent_store.commit_large_blob_array(&[]).is_ok()); + let restored_large_blob_array = persistent_store + .get_large_blob_array(SHARD_SIZE + 1, 0) + .unwrap(); + let empty_blob_array = vec![ + 0x80, 0x76, 0xbe, 0x8b, 0x52, 0x8d, 0x00, 0x75, 0xf7, 0xaa, 0xe9, 0x8d, 0x6f, 0xa5, + 0x7a, 0x6d, 0x3c, + ]; + assert_eq!(empty_blob_array, restored_large_blob_array); + } + #[test] fn test_global_signature_counter() { let mut rng = ThreadRng256 {}; diff --git a/src/ctap/storage/key.rs b/src/ctap/storage/key.rs index 1c0e21ed..4f6ba513 100644 --- a/src/ctap/storage/key.rs +++ b/src/ctap/storage/key.rs @@ -88,6 +88,11 @@ make_partition! { /// board may configure `MAX_SUPPORTED_RESIDENT_KEYS` depending on the storage size. CREDENTIALS = 1700..2000; + /// Storage for the serialized large blob array. + /// + /// The stored large blob can be too big for one key, so it has to be sharded. + LARGE_BLOB_SHARDS = 2000..2016; + /// If this entry exists and equals 1, the PIN needs to be changed. FORCE_PIN_CHANGE = 2040; From 3517b1163d9e76243923153d743f33c8977cc15f Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Fri, 22 Jan 2021 13:48:27 +0100 Subject: [PATCH 2/9] bigger shards, fixed get_large_blob --- src/ctap/large_blobs.rs | 4 ++-- src/ctap/storage.rs | 27 +++++++++++++++++---------- src/ctap/storage/key.rs | 2 +- 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/src/ctap/large_blobs.rs b/src/ctap/large_blobs.rs index 32934e93..6dc80ce6 100644 --- a/src/ctap/large_blobs.rs +++ b/src/ctap/large_blobs.rs @@ -150,8 +150,8 @@ mod test { let mut large_blobs = LargeBlobs::new(); let large_blob = vec![ - 0x80, 0x76, 0xbe, 0x8b, 0x52, 0x8d, 0x00, 0x75, 0xf7, 0xaa, 0xe9, 0x8d, 0x6f, 0xa5, - 0x7a, 0x6d, 0x3c, + 0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D, 0x6F, 0xA5, + 0x7A, 0x6D, 0x3C, ]; let large_blobs_params = AuthenticatorLargeBlobsParameters { get: Some(large_blob.len()), diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index a89c02d9..9b6ce313 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -60,7 +60,7 @@ const DEFAULT_MIN_PIN_LENGTH_RP_IDS: Vec = Vec::new(); // This constant is an attempt to limit storage requirements. If you don't set it to 0, // the stored strings can still be unbounded, but that is true for all RP IDs. pub const MAX_RP_IDS_LENGTH: usize = 8; -const SHARD_SIZE: usize = 128; +const SHARD_SIZE: usize = 1023; pub const MAX_LARGE_BLOB_ARRAY_SIZE: usize = SHARD_SIZE * (key::LARGE_BLOB_SHARDS.end - key::LARGE_BLOB_SHARDS.start); @@ -473,7 +473,8 @@ impl PersistentStore { /// Reads the byte vector stored as the serialized large blobs array. /// - /// If more data is requested than stored, return as many bytes as possible. + /// If too few bytes exist at that offset, return the maximum number + /// available. This includes cases of offset being beyond the stored array. pub fn get_large_blob_array( &self, mut byte_count: usize, @@ -481,24 +482,24 @@ impl PersistentStore { ) -> Result, Ctap2StatusCode> { if self.store.find(key::LARGE_BLOB_SHARDS.start)?.is_none() { return Ok(vec![ - 0x80, 0x76, 0xbe, 0x8b, 0x52, 0x8d, 0x00, 0x75, 0xf7, 0xaa, 0xe9, 0x8d, 0x6f, 0xa5, - 0x7a, 0x6d, 0x3c, + 0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D, 0x6F, 0xA5, + 0x7A, 0x6D, 0x3C, ]); } let mut output = Vec::with_capacity(byte_count); while byte_count > 0 { - let shard = offset / SHARD_SIZE; let shard_offset = offset % SHARD_SIZE; let shard_length = cmp::min(SHARD_SIZE - shard_offset, byte_count); - let shard_key = key::LARGE_BLOB_SHARDS.start + shard; + let shard_key = key::LARGE_BLOB_SHARDS.start + offset / SHARD_SIZE; if !key::LARGE_BLOB_SHARDS.contains(&shard_key) { // This request should have been caught at application level. return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } let shard_entry = self.store.find(shard_key)?.unwrap_or_default(); if shard_entry.len() < shard_offset + shard_length { - output.extend(&shard_entry[..]); + // If fewer bytes exist than requested, return them all. + output.extend(&shard_entry[shard_offset..]); return Ok(output); } output.extend(&shard_entry[shard_offset..shard_offset + shard_length]); @@ -529,7 +530,7 @@ impl PersistentStore { } // The length is not stored, so overwrite old entries explicitly. for key in shard_key..key::LARGE_BLOB_SHARDS.end { - // Assuming the store optimizes out unnecessary writes. + // Assuming the store optimizes out unnecessary removes. self.store.remove(key)?; } Ok(()) @@ -1223,12 +1224,18 @@ mod test { let mut rng = ThreadRng256 {}; let mut persistent_store = PersistentStore::new(&mut rng); - let large_blob_array = vec![0xC0; 1]; + let large_blob_array = vec![0x01, 0x02, 0x03]; assert!(persistent_store .commit_large_blob_array(&large_blob_array) .is_ok()); let restored_large_blob_array = persistent_store.get_large_blob_array(1, 0).unwrap(); - assert_eq!(large_blob_array, restored_large_blob_array); + assert_eq!(vec![0x01], restored_large_blob_array); + let restored_large_blob_array = persistent_store.get_large_blob_array(1, 1).unwrap(); + assert_eq!(vec![0x02], restored_large_blob_array); + let restored_large_blob_array = persistent_store.get_large_blob_array(1, 2).unwrap(); + assert_eq!(vec![0x03], restored_large_blob_array); + let restored_large_blob_array = persistent_store.get_large_blob_array(2, 2).unwrap(); + assert_eq!(vec![0x03], restored_large_blob_array); let large_blob_array = vec![0xC0; SHARD_SIZE]; assert!(persistent_store diff --git a/src/ctap/storage/key.rs b/src/ctap/storage/key.rs index 4f6ba513..20936859 100644 --- a/src/ctap/storage/key.rs +++ b/src/ctap/storage/key.rs @@ -91,7 +91,7 @@ make_partition! { /// Storage for the serialized large blob array. /// /// The stored large blob can be too big for one key, so it has to be sharded. - LARGE_BLOB_SHARDS = 2000..2016; + LARGE_BLOB_SHARDS = 2000..2004; /// If this entry exists and equals 1, the PIN needs to be changed. FORCE_PIN_CHANGE = 2040; From cf8b54b39c1f0ab7b5232ba4af8572628b46fc4b Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Fri, 22 Jan 2021 14:16:34 +0100 Subject: [PATCH 3/9] large blob commit is one transaction --- src/ctap/storage.rs | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 9b6ce313..0b66f4f1 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -514,26 +514,25 @@ impl PersistentStore { &mut self, large_blob_array: &[u8], ) -> Result<(), Ctap2StatusCode> { - let mut large_blob_index = 0; - let mut shard_key = key::LARGE_BLOB_SHARDS.start; - while large_blob_index < large_blob_array.len() { - if !key::LARGE_BLOB_SHARDS.contains(&shard_key) { - return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); - } - let shard_length = cmp::min(SHARD_SIZE, large_blob_array.len() - large_blob_index); - self.store.insert( - shard_key, - &large_blob_array[large_blob_index..large_blob_index + shard_length], - )?; - large_blob_index += shard_length; - shard_key += 1; + if large_blob_array.len() > MAX_LARGE_BLOB_ARRAY_SIZE { + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } - // The length is not stored, so overwrite old entries explicitly. - for key in shard_key..key::LARGE_BLOB_SHARDS.end { - // Assuming the store optimizes out unnecessary removes. - self.store.remove(key)?; + const MIN_SHARD_KEY: usize = key::LARGE_BLOB_SHARDS.start; + const SHARD_COUNT: usize = key::LARGE_BLOB_SHARDS.end - MIN_SHARD_KEY; + let mut transactions = Vec::with_capacity(SHARD_COUNT); + for shard_key in MIN_SHARD_KEY..key::LARGE_BLOB_SHARDS.end { + let large_blob_index = (shard_key - MIN_SHARD_KEY) * SHARD_SIZE; + if large_blob_array.len() > large_blob_index { + let shard_length = cmp::min(SHARD_SIZE, large_blob_array.len() - large_blob_index); + transactions.push(StoreUpdate::Insert { + key: shard_key, + value: &large_blob_array[large_blob_index..large_blob_index + shard_length], + }); + } else { + transactions.push(StoreUpdate::Remove { key: shard_key }); + } } - Ok(()) + Ok(self.store.transaction(&transactions)?) } /// Returns the attestation private key if defined. From 7d04c5c6d0140115ea5914469f8b09365385ece6 Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Fri, 22 Jan 2021 14:23:32 +0100 Subject: [PATCH 4/9] fixes const usage in test_get_info --- src/ctap/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ctap/mod.rs b/src/ctap/mod.rs index 95b5b0a1..8fa622ce 100644 --- a/src/ctap/mod.rs +++ b/src/ctap/mod.rs @@ -1267,7 +1267,7 @@ mod test { "setMinPINLength" => true, "forcePINChange" => false, }, - 0x05 => 1024, + 0x05 => MAX_MSG_SIZE as u64, 0x06 => cbor_array_vec![vec![1]], 0x07 => MAX_CREDENTIAL_COUNT_IN_LIST.map(|c| c as u64), 0x08 => CREDENTIAL_ID_SIZE as u64, From 19c089e955547d34ce5857c1661f5f62252e07e7 Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Fri, 22 Jan 2021 18:54:45 +0100 Subject: [PATCH 5/9] improvements to large blob storage --- src/ctap/large_blobs.rs | 2 +- src/ctap/storage.rs | 188 +++++++++++++++++++++++++--------------- 2 files changed, 120 insertions(+), 70 deletions(-) diff --git a/src/ctap/large_blobs.rs b/src/ctap/large_blobs.rs index 6dc80ce6..ab38df09 100644 --- a/src/ctap/large_blobs.rs +++ b/src/ctap/large_blobs.rs @@ -69,7 +69,7 @@ impl LargeBlobs { if get > MAX_FRAGMENT_LENGTH { return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_LENGTH); } - let config = persistent_store.get_large_blob_array(get, offset)?; + let config = persistent_store.get_large_blob_array(offset, get)?; return Ok(ResponseData::AuthenticatorLargeBlobs(Some( AuthenticatorLargeBlobsResponse { config }, ))); diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 0b66f4f1..934d5337 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -60,9 +60,7 @@ const DEFAULT_MIN_PIN_LENGTH_RP_IDS: Vec = Vec::new(); // This constant is an attempt to limit storage requirements. If you don't set it to 0, // the stored strings can still be unbounded, but that is true for all RP IDs. pub const MAX_RP_IDS_LENGTH: usize = 8; -const SHARD_SIZE: usize = 1023; -pub const MAX_LARGE_BLOB_ARRAY_SIZE: usize = - SHARD_SIZE * (key::LARGE_BLOB_SHARDS.end - key::LARGE_BLOB_SHARDS.start); +pub const MAX_LARGE_BLOB_ARRAY_SIZE: usize = 2048; /// Wrapper for master keys. pub struct MasterKeys { @@ -471,38 +469,55 @@ impl PersistentStore { )?) } + /// The size used for shards of large blobs. + /// + /// This value is constant during the lifetime of the device. + fn shard_size(&self) -> usize { + self.store.max_value_length() + } + /// Reads the byte vector stored as the serialized large blobs array. /// /// If too few bytes exist at that offset, return the maximum number /// available. This includes cases of offset being beyond the stored array. + /// + /// If no large blob is committed to the store, get responds as if an empty + /// CBOR array (0x80) was written, together with the 16 byte prefix of its + /// SHA256, to a total length of 17 byte (which is the shortest legitemate + /// large blob entry possible). pub fn get_large_blob_array( &self, - mut byte_count: usize, mut offset: usize, + mut byte_count: usize, ) -> Result, Ctap2StatusCode> { - if self.store.find(key::LARGE_BLOB_SHARDS.start)?.is_none() { - return Ok(vec![ - 0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D, 0x6F, 0xA5, - 0x7A, 0x6D, 0x3C, - ]); - } let mut output = Vec::with_capacity(byte_count); while byte_count > 0 { - let shard_offset = offset % SHARD_SIZE; - let shard_length = cmp::min(SHARD_SIZE - shard_offset, byte_count); - - let shard_key = key::LARGE_BLOB_SHARDS.start + offset / SHARD_SIZE; + let shard_key = key::LARGE_BLOB_SHARDS.start + offset / self.shard_size(); if !key::LARGE_BLOB_SHARDS.contains(&shard_key) { // This request should have been caught at application level. return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } - let shard_entry = self.store.find(shard_key)?.unwrap_or_default(); - if shard_entry.len() < shard_offset + shard_length { - // If fewer bytes exist than requested, return them all. - output.extend(&shard_entry[shard_offset..]); - return Ok(output); + let shard_entry = self.store.find(shard_key)?; + let shard_entry = if shard_key == key::LARGE_BLOB_SHARDS.start { + shard_entry.unwrap_or_else(|| { + vec![ + 0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D, + 0x6F, 0xA5, 0x7A, 0x6D, 0x3C, + ] + }) + } else { + shard_entry.unwrap_or_default() + }; + + let shard_offset = offset % self.shard_size(); + if shard_entry.len() < shard_offset { + break; + } + let shard_length = cmp::min(shard_entry.len() - shard_offset, byte_count); + output.extend(&shard_entry[shard_offset..][..shard_length]); + if shard_entry.len() < self.shard_size() { + break; } - output.extend(&shard_entry[shard_offset..shard_offset + shard_length]); offset += shard_length; byte_count -= shard_length; } @@ -517,22 +532,18 @@ impl PersistentStore { if large_blob_array.len() > MAX_LARGE_BLOB_ARRAY_SIZE { return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } - const MIN_SHARD_KEY: usize = key::LARGE_BLOB_SHARDS.start; - const SHARD_COUNT: usize = key::LARGE_BLOB_SHARDS.end - MIN_SHARD_KEY; - let mut transactions = Vec::with_capacity(SHARD_COUNT); - for shard_key in MIN_SHARD_KEY..key::LARGE_BLOB_SHARDS.end { - let large_blob_index = (shard_key - MIN_SHARD_KEY) * SHARD_SIZE; - if large_blob_array.len() > large_blob_index { - let shard_length = cmp::min(SHARD_SIZE, large_blob_array.len() - large_blob_index); - transactions.push(StoreUpdate::Insert { - key: shard_key, - value: &large_blob_array[large_blob_index..large_blob_index + shard_length], - }); - } else { - transactions.push(StoreUpdate::Remove { key: shard_key }); - } + + let mut shards = large_blob_array.chunks(self.shard_size()); + let mut updates = Vec::with_capacity(shards.len()); + for key in key::LARGE_BLOB_SHARDS { + let update = match shards.next() { + Some(value) => StoreUpdate::Insert { key, value }, + None if self.store.find(key)?.is_some() => StoreUpdate::Remove { key }, + _ => break, + }; + updates.push(update); } - Ok(self.store.transaction(&transactions)?) + Ok(self.store.transaction(&updates)?) } /// Returns the attestation private key if defined. @@ -1213,9 +1224,19 @@ mod test { } #[test] - #[allow(clippy::assertions_on_constants)] fn test_max_large_blob_array_size() { - assert!(MAX_LARGE_BLOB_ARRAY_SIZE >= 1024); + let mut rng = ThreadRng256 {}; + let persistent_store = PersistentStore::new(&mut rng); + + #[allow(clippy::assertions_on_constants)] + { + assert!(MAX_LARGE_BLOB_ARRAY_SIZE >= 1024); + } + assert!( + MAX_LARGE_BLOB_ARRAY_SIZE + <= persistent_store.shard_size() + * (key::LARGE_BLOB_SHARDS.end - key::LARGE_BLOB_SHARDS.start) + ); } #[test] @@ -1227,25 +1248,29 @@ mod test { assert!(persistent_store .commit_large_blob_array(&large_blob_array) .is_ok()); - let restored_large_blob_array = persistent_store.get_large_blob_array(1, 0).unwrap(); + let restored_large_blob_array = persistent_store.get_large_blob_array(0, 1).unwrap(); assert_eq!(vec![0x01], restored_large_blob_array); let restored_large_blob_array = persistent_store.get_large_blob_array(1, 1).unwrap(); assert_eq!(vec![0x02], restored_large_blob_array); - let restored_large_blob_array = persistent_store.get_large_blob_array(1, 2).unwrap(); + let restored_large_blob_array = persistent_store.get_large_blob_array(2, 1).unwrap(); assert_eq!(vec![0x03], restored_large_blob_array); let restored_large_blob_array = persistent_store.get_large_blob_array(2, 2).unwrap(); assert_eq!(vec![0x03], restored_large_blob_array); + let restored_large_blob_array = persistent_store.get_large_blob_array(3, 1).unwrap(); + assert_eq!(Vec::::new(), restored_large_blob_array); + let restored_large_blob_array = persistent_store.get_large_blob_array(4, 1).unwrap(); + assert_eq!(Vec::::new(), restored_large_blob_array); - let large_blob_array = vec![0xC0; SHARD_SIZE]; + let large_blob_array = vec![0xC0; persistent_store.shard_size()]; assert!(persistent_store .commit_large_blob_array(&large_blob_array) .is_ok()); let restored_large_blob_array = persistent_store - .get_large_blob_array(SHARD_SIZE, 0) + .get_large_blob_array(0, persistent_store.shard_size()) .unwrap(); assert_eq!(large_blob_array, restored_large_blob_array); let restored_large_blob_array = persistent_store - .get_large_blob_array(SHARD_SIZE + 1, 0) + .get_large_blob_array(0, persistent_store.shard_size() + 1) .unwrap(); assert_eq!(large_blob_array, restored_large_blob_array); } @@ -1255,32 +1280,32 @@ mod test { let mut rng = ThreadRng256 {}; let mut persistent_store = PersistentStore::new(&mut rng); - let large_blob_array = vec![0xC0; SHARD_SIZE + 1]; + let large_blob_array = vec![0xC0; persistent_store.shard_size() + 1]; assert!(persistent_store .commit_large_blob_array(&large_blob_array) .is_ok()); let restored_large_blob_array = persistent_store - .get_large_blob_array(SHARD_SIZE, 0) + .get_large_blob_array(0, persistent_store.shard_size()) .unwrap(); assert_eq!( - large_blob_array[..SHARD_SIZE], + large_blob_array[..persistent_store.shard_size()], restored_large_blob_array[..] ); let restored_large_blob_array = persistent_store - .get_large_blob_array(SHARD_SIZE + 1, 0) + .get_large_blob_array(0, persistent_store.shard_size() + 1) .unwrap(); assert_eq!(large_blob_array, restored_large_blob_array); - let large_blob_array = vec![0xC0; 2 * SHARD_SIZE]; + let large_blob_array = vec![0xC0; 2 * persistent_store.shard_size()]; assert!(persistent_store .commit_large_blob_array(&large_blob_array) .is_ok()); let restored_large_blob_array = persistent_store - .get_large_blob_array(2 * SHARD_SIZE, 0) + .get_large_blob_array(0, 2 * persistent_store.shard_size()) .unwrap(); assert_eq!(large_blob_array, restored_large_blob_array); let restored_large_blob_array = persistent_store - .get_large_blob_array(2 * SHARD_SIZE + 1, 0) + .get_large_blob_array(0, 2 * persistent_store.shard_size() + 1) .unwrap(); assert_eq!(large_blob_array, restored_large_blob_array); } @@ -1290,37 +1315,46 @@ mod test { let mut rng = ThreadRng256 {}; let mut persistent_store = PersistentStore::new(&mut rng); - let mut large_blob_array = vec![0x11; SHARD_SIZE]; - large_blob_array.extend([0x22; SHARD_SIZE].iter()); - large_blob_array.extend([0x33; 1].iter()); + let mut large_blob_array = vec![0x11; persistent_store.shard_size()]; + large_blob_array.extend(vec![0x22; persistent_store.shard_size()]); + large_blob_array.extend(&[0x33; 1]); assert!(persistent_store .commit_large_blob_array(&large_blob_array) .is_ok()); let restored_large_blob_array = persistent_store - .get_large_blob_array(2 * SHARD_SIZE + 1, 0) + .get_large_blob_array(0, 2 * persistent_store.shard_size() + 1) .unwrap(); assert_eq!(large_blob_array, restored_large_blob_array); let restored_large_blob_array = persistent_store - .get_large_blob_array(3 * SHARD_SIZE, 0) + .get_large_blob_array(0, 3 * persistent_store.shard_size()) .unwrap(); assert_eq!(large_blob_array, restored_large_blob_array); let shard1 = persistent_store - .get_large_blob_array(SHARD_SIZE, 0) + .get_large_blob_array(0, persistent_store.shard_size()) .unwrap(); let shard2 = persistent_store - .get_large_blob_array(SHARD_SIZE, SHARD_SIZE) + .get_large_blob_array(persistent_store.shard_size(), persistent_store.shard_size()) .unwrap(); let shard3 = persistent_store - .get_large_blob_array(1, 2 * SHARD_SIZE) + .get_large_blob_array(2 * persistent_store.shard_size(), 1) .unwrap(); - assert_eq!(large_blob_array[..SHARD_SIZE], shard1[..]); - assert_eq!(large_blob_array[SHARD_SIZE..2 * SHARD_SIZE], shard2[..]); - assert_eq!(large_blob_array[2 * SHARD_SIZE..], shard3[..]); + assert_eq!( + large_blob_array[..persistent_store.shard_size()], + shard1[..] + ); + assert_eq!( + large_blob_array[persistent_store.shard_size()..2 * persistent_store.shard_size()], + shard2[..] + ); + assert_eq!( + large_blob_array[2 * persistent_store.shard_size()..], + shard3[..] + ); let shard12 = persistent_store - .get_large_blob_array(2, SHARD_SIZE - 1) + .get_large_blob_array(persistent_store.shard_size() - 1, 2) .unwrap(); let shard23 = persistent_store - .get_large_blob_array(2, 2 * SHARD_SIZE - 1) + .get_large_blob_array(2 * persistent_store.shard_size() - 1, 2) .unwrap(); assert_eq!(vec![0x11, 0x22], shard12); assert_eq!(vec![0x22, 0x33], shard23); @@ -1331,32 +1365,48 @@ mod test { let mut rng = ThreadRng256 {}; let mut persistent_store = PersistentStore::new(&mut rng); - let large_blob_array = vec![0x11; SHARD_SIZE + 1]; + let large_blob_array = vec![0x11; persistent_store.shard_size() + 1]; assert!(persistent_store .commit_large_blob_array(&large_blob_array) .is_ok()); - let large_blob_array = vec![0x22; SHARD_SIZE]; + let large_blob_array = vec![0x22; persistent_store.shard_size()]; assert!(persistent_store .commit_large_blob_array(&large_blob_array) .is_ok()); let restored_large_blob_array = persistent_store - .get_large_blob_array(SHARD_SIZE + 1, 0) + .get_large_blob_array(0, persistent_store.shard_size() + 1) .unwrap(); assert_eq!(large_blob_array, restored_large_blob_array); let restored_large_blob_array = persistent_store - .get_large_blob_array(1, SHARD_SIZE) + .get_large_blob_array(persistent_store.shard_size(), 1) .unwrap(); assert_eq!(Vec::::new(), restored_large_blob_array); assert!(persistent_store.commit_large_blob_array(&[]).is_ok()); let restored_large_blob_array = persistent_store - .get_large_blob_array(SHARD_SIZE + 1, 0) + .get_large_blob_array(0, persistent_store.shard_size() + 1) .unwrap(); + // Committing an empty array resets to the default blob of 17 byte. + assert_eq!(restored_large_blob_array.len(), 17); + } + + #[test] + fn test_commit_get_large_blob_array_no_commit() { + let mut rng = ThreadRng256 {}; + let persistent_store = PersistentStore::new(&mut rng); + let empty_blob_array = vec![ - 0x80, 0x76, 0xbe, 0x8b, 0x52, 0x8d, 0x00, 0x75, 0xf7, 0xaa, 0xe9, 0x8d, 0x6f, 0xa5, - 0x7a, 0x6d, 0x3c, + 0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D, 0x6F, 0xA5, + 0x7A, 0x6D, 0x3C, ]; + let restored_large_blob_array = persistent_store + .get_large_blob_array(0, persistent_store.shard_size()) + .unwrap(); assert_eq!(empty_blob_array, restored_large_blob_array); + let restored_large_blob_array = persistent_store.get_large_blob_array(0, 1).unwrap(); + assert_eq!(vec![0x80], restored_large_blob_array); + let restored_large_blob_array = persistent_store.get_large_blob_array(16, 1).unwrap(); + assert_eq!(vec![0x3C], restored_large_blob_array); } #[test] From 563f35184ac8e97849b677f4d4f026e849329aa3 Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Mon, 25 Jan 2021 17:50:01 +0100 Subject: [PATCH 6/9] use new store fragments --- src/ctap/storage.rs | 79 ++++++++++++++------------------------------- 1 file changed, 25 insertions(+), 54 deletions(-) diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 934d5337..dab66207 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -32,6 +32,7 @@ use core::cmp; use core::convert::TryInto; use crypto::rng256::Rng256; use persistent_store::StoreUpdate; +use persistent_store::fragment::{read_range, write}; // Those constants may be modified before compilation to tune the behavior of the key. // @@ -469,13 +470,6 @@ impl PersistentStore { )?) } - /// The size used for shards of large blobs. - /// - /// This value is constant during the lifetime of the device. - fn shard_size(&self) -> usize { - self.store.max_value_length() - } - /// Reads the byte vector stored as the serialized large blobs array. /// /// If too few bytes exist at that offset, return the maximum number @@ -483,45 +477,23 @@ impl PersistentStore { /// /// If no large blob is committed to the store, get responds as if an empty /// CBOR array (0x80) was written, together with the 16 byte prefix of its - /// SHA256, to a total length of 17 byte (which is the shortest legitemate + /// SHA256, to a total length of 17 byte (which is the shortest legitimate /// large blob entry possible). pub fn get_large_blob_array( &self, - mut offset: usize, - mut byte_count: usize, + offset: usize, + byte_count: usize, ) -> Result, Ctap2StatusCode> { - let mut output = Vec::with_capacity(byte_count); - while byte_count > 0 { - let shard_key = key::LARGE_BLOB_SHARDS.start + offset / self.shard_size(); - if !key::LARGE_BLOB_SHARDS.contains(&shard_key) { - // This request should have been caught at application level. - return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); - } - let shard_entry = self.store.find(shard_key)?; - let shard_entry = if shard_key == key::LARGE_BLOB_SHARDS.start { - shard_entry.unwrap_or_else(|| { - vec![ - 0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D, - 0x6F, 0xA5, 0x7A, 0x6D, 0x3C, - ] - }) - } else { - shard_entry.unwrap_or_default() - }; - - let shard_offset = offset % self.shard_size(); - if shard_entry.len() < shard_offset { - break; - } - let shard_length = cmp::min(shard_entry.len() - shard_offset, byte_count); - output.extend(&shard_entry[shard_offset..][..shard_length]); - if shard_entry.len() < self.shard_size() { - break; - } - offset += shard_length; - byte_count -= shard_length; - } - Ok(output) + let byte_range = offset..offset + byte_count; + let output = read_range(&self.store, &key::LARGE_BLOB_SHARDS, byte_range)?; + Ok(output.unwrap_or_else(|| { + let empty_large_blob = vec![ + 0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D, 0x6F, 0xA5, + 0x7A, 0x6D, 0x3C, + ]; + let last_index = cmp::min(empty_large_blob.len(), offset + byte_count); + empty_large_blob.get(offset..last_index).unwrap_or_default().to_vec() + })) } /// Sets a byte vector as the serialized large blobs array. @@ -529,21 +501,11 @@ impl PersistentStore { &mut self, large_blob_array: &[u8], ) -> Result<(), Ctap2StatusCode> { + // This input should have been caught at caller level. if large_blob_array.len() > MAX_LARGE_BLOB_ARRAY_SIZE { return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } - - let mut shards = large_blob_array.chunks(self.shard_size()); - let mut updates = Vec::with_capacity(shards.len()); - for key in key::LARGE_BLOB_SHARDS { - let update = match shards.next() { - Some(value) => StoreUpdate::Insert { key, value }, - None if self.store.find(key)?.is_some() => StoreUpdate::Remove { key }, - _ => break, - }; - updates.push(update); - } - Ok(self.store.transaction(&updates)?) + Ok(write(&mut self.store, &key::LARGE_BLOB_SHARDS, large_blob_array)?) } /// Returns the attestation private key if defined. @@ -642,6 +604,15 @@ impl PersistentStore { pub fn force_pin_change(&mut self) -> Result<(), Ctap2StatusCode> { Ok(self.store.insert(key::FORCE_PIN_CHANGE, &[])?) } + + /// The size used for shards of large blobs. + /// + /// This value is constant during the lifetime of the device. + #[cfg(test)] + fn shard_size(&self) -> usize { + self.store.max_value_length() + } + } impl From for Ctap2StatusCode { From 4f3c773b15ccc6f7f04bc871b0e760dbb016aab2 Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Mon, 25 Jan 2021 18:08:48 +0100 Subject: [PATCH 7/9] formats code, clippy --- libraries/persistent_store/src/fragment.rs | 1 + src/ctap/storage.rs | 14 ++++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/libraries/persistent_store/src/fragment.rs b/libraries/persistent_store/src/fragment.rs index 73b6d291..851e3d28 100644 --- a/libraries/persistent_store/src/fragment.rs +++ b/libraries/persistent_store/src/fragment.rs @@ -24,6 +24,7 @@ use alloc::vec::Vec; use core::ops::Range; /// Represents a sequence of keys. +#[allow(clippy::len_without_is_empty)] pub trait Keys { /// Returns the number of keys. fn len(&self) -> usize; diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index dab66207..04080416 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -31,8 +31,8 @@ use cbor::cbor_array_vec; use core::cmp; use core::convert::TryInto; use crypto::rng256::Rng256; -use persistent_store::StoreUpdate; use persistent_store::fragment::{read_range, write}; +use persistent_store::StoreUpdate; // Those constants may be modified before compilation to tune the behavior of the key. // @@ -492,7 +492,10 @@ impl PersistentStore { 0x7A, 0x6D, 0x3C, ]; let last_index = cmp::min(empty_large_blob.len(), offset + byte_count); - empty_large_blob.get(offset..last_index).unwrap_or_default().to_vec() + empty_large_blob + .get(offset..last_index) + .unwrap_or_default() + .to_vec() })) } @@ -505,7 +508,11 @@ impl PersistentStore { if large_blob_array.len() > MAX_LARGE_BLOB_ARRAY_SIZE { return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } - Ok(write(&mut self.store, &key::LARGE_BLOB_SHARDS, large_blob_array)?) + Ok(write( + &mut self.store, + &key::LARGE_BLOB_SHARDS, + large_blob_array, + )?) } /// Returns the attestation private key if defined. @@ -612,7 +619,6 @@ impl PersistentStore { fn shard_size(&self) -> usize { self.store.max_value_length() } - } impl From for Ctap2StatusCode { From 2af85ad9d0eebe8ef11d837b935f9d7739e1f9ea Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Mon, 25 Jan 2021 18:29:38 +0100 Subject: [PATCH 8/9] style fix --- src/ctap/storage.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 7fdf50c0..6f754617 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -31,8 +31,7 @@ use cbor::cbor_array_vec; use core::cmp; use core::convert::TryInto; use crypto::rng256::Rng256; -use persistent_store::fragment::{read_range, write}; -use persistent_store::StoreUpdate; +use persistent_store::{fragment, StoreUpdate}; // Those constants may be modified before compilation to tune the behavior of the key. // @@ -485,14 +484,14 @@ impl PersistentStore { byte_count: usize, ) -> Result, Ctap2StatusCode> { let byte_range = offset..offset + byte_count; - let output = read_range(&self.store, &key::LARGE_BLOB_SHARDS, byte_range)?; + let output = fragment::read_range(&self.store, &key::LARGE_BLOB_SHARDS, byte_range)?; Ok(output.unwrap_or_else(|| { - let empty_large_blob = vec![ + const EMPTY_LARGE_BLOB: [u8; 17] = [ 0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D, 0x6F, 0xA5, 0x7A, 0x6D, 0x3C, ]; - let last_index = cmp::min(empty_large_blob.len(), offset + byte_count); - empty_large_blob + let last_index = cmp::min(EMPTY_LARGE_BLOB.len(), offset + byte_count); + EMPTY_LARGE_BLOB .get(offset..last_index) .unwrap_or_default() .to_vec() @@ -508,7 +507,7 @@ impl PersistentStore { if large_blob_array.len() > MAX_LARGE_BLOB_ARRAY_SIZE { return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } - Ok(write( + Ok(fragment::write( &mut self.store, &key::LARGE_BLOB_SHARDS, large_blob_array, From 769a2ae1c543a731a3daae1e386de72d842ff019 Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Mon, 25 Jan 2021 18:43:51 +0100 Subject: [PATCH 9/9] reduce testing to not account for shard size --- src/ctap/storage.rs | 130 +++----------------------------------------- 1 file changed, 8 insertions(+), 122 deletions(-) diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 6f754617..43a00c7c 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -610,14 +610,6 @@ impl PersistentStore { pub fn force_pin_change(&mut self) -> Result<(), Ctap2StatusCode> { Ok(self.store.insert(key::FORCE_PIN_CHANGE, &[])?) } - - /// The size used for shards of large blobs. - /// - /// This value is constant during the lifetime of the device. - #[cfg(test)] - fn shard_size(&self) -> usize { - self.store.max_value_length() - } } impl From for Ctap2StatusCode { @@ -1210,13 +1202,13 @@ mod test { } assert!( MAX_LARGE_BLOB_ARRAY_SIZE - <= persistent_store.shard_size() + <= persistent_store.store.max_value_length() * (key::LARGE_BLOB_SHARDS.end - key::LARGE_BLOB_SHARDS.start) ); } #[test] - fn test_commit_get_large_blob_array_1_shard() { + fn test_commit_get_large_blob_array() { let mut rng = ThreadRng256 {}; let mut persistent_store = PersistentStore::new(&mut rng); @@ -1236,104 +1228,6 @@ mod test { assert_eq!(Vec::::new(), restored_large_blob_array); let restored_large_blob_array = persistent_store.get_large_blob_array(4, 1).unwrap(); assert_eq!(Vec::::new(), restored_large_blob_array); - - let large_blob_array = vec![0xC0; persistent_store.shard_size()]; - assert!(persistent_store - .commit_large_blob_array(&large_blob_array) - .is_ok()); - let restored_large_blob_array = persistent_store - .get_large_blob_array(0, persistent_store.shard_size()) - .unwrap(); - assert_eq!(large_blob_array, restored_large_blob_array); - let restored_large_blob_array = persistent_store - .get_large_blob_array(0, persistent_store.shard_size() + 1) - .unwrap(); - assert_eq!(large_blob_array, restored_large_blob_array); - } - - #[test] - fn test_commit_get_large_blob_array_2_shards() { - let mut rng = ThreadRng256 {}; - let mut persistent_store = PersistentStore::new(&mut rng); - - let large_blob_array = vec![0xC0; persistent_store.shard_size() + 1]; - assert!(persistent_store - .commit_large_blob_array(&large_blob_array) - .is_ok()); - let restored_large_blob_array = persistent_store - .get_large_blob_array(0, persistent_store.shard_size()) - .unwrap(); - assert_eq!( - large_blob_array[..persistent_store.shard_size()], - restored_large_blob_array[..] - ); - let restored_large_blob_array = persistent_store - .get_large_blob_array(0, persistent_store.shard_size() + 1) - .unwrap(); - assert_eq!(large_blob_array, restored_large_blob_array); - - let large_blob_array = vec![0xC0; 2 * persistent_store.shard_size()]; - assert!(persistent_store - .commit_large_blob_array(&large_blob_array) - .is_ok()); - let restored_large_blob_array = persistent_store - .get_large_blob_array(0, 2 * persistent_store.shard_size()) - .unwrap(); - assert_eq!(large_blob_array, restored_large_blob_array); - let restored_large_blob_array = persistent_store - .get_large_blob_array(0, 2 * persistent_store.shard_size() + 1) - .unwrap(); - assert_eq!(large_blob_array, restored_large_blob_array); - } - - #[test] - fn test_commit_get_large_blob_array_3_shards() { - let mut rng = ThreadRng256 {}; - let mut persistent_store = PersistentStore::new(&mut rng); - - let mut large_blob_array = vec![0x11; persistent_store.shard_size()]; - large_blob_array.extend(vec![0x22; persistent_store.shard_size()]); - large_blob_array.extend(&[0x33; 1]); - assert!(persistent_store - .commit_large_blob_array(&large_blob_array) - .is_ok()); - let restored_large_blob_array = persistent_store - .get_large_blob_array(0, 2 * persistent_store.shard_size() + 1) - .unwrap(); - assert_eq!(large_blob_array, restored_large_blob_array); - let restored_large_blob_array = persistent_store - .get_large_blob_array(0, 3 * persistent_store.shard_size()) - .unwrap(); - assert_eq!(large_blob_array, restored_large_blob_array); - let shard1 = persistent_store - .get_large_blob_array(0, persistent_store.shard_size()) - .unwrap(); - let shard2 = persistent_store - .get_large_blob_array(persistent_store.shard_size(), persistent_store.shard_size()) - .unwrap(); - let shard3 = persistent_store - .get_large_blob_array(2 * persistent_store.shard_size(), 1) - .unwrap(); - assert_eq!( - large_blob_array[..persistent_store.shard_size()], - shard1[..] - ); - assert_eq!( - large_blob_array[persistent_store.shard_size()..2 * persistent_store.shard_size()], - shard2[..] - ); - assert_eq!( - large_blob_array[2 * persistent_store.shard_size()..], - shard3[..] - ); - let shard12 = persistent_store - .get_large_blob_array(persistent_store.shard_size() - 1, 2) - .unwrap(); - let shard23 = persistent_store - .get_large_blob_array(2 * persistent_store.shard_size() - 1, 2) - .unwrap(); - assert_eq!(vec![0x11, 0x22], shard12); - assert_eq!(vec![0x22, 0x33], shard23); } #[test] @@ -1341,27 +1235,21 @@ mod test { let mut rng = ThreadRng256 {}; let mut persistent_store = PersistentStore::new(&mut rng); - let large_blob_array = vec![0x11; persistent_store.shard_size() + 1]; + let large_blob_array = vec![0x11; 5]; assert!(persistent_store .commit_large_blob_array(&large_blob_array) .is_ok()); - let large_blob_array = vec![0x22; persistent_store.shard_size()]; + let large_blob_array = vec![0x22; 4]; assert!(persistent_store .commit_large_blob_array(&large_blob_array) .is_ok()); - let restored_large_blob_array = persistent_store - .get_large_blob_array(0, persistent_store.shard_size() + 1) - .unwrap(); + let restored_large_blob_array = persistent_store.get_large_blob_array(0, 5).unwrap(); assert_eq!(large_blob_array, restored_large_blob_array); - let restored_large_blob_array = persistent_store - .get_large_blob_array(persistent_store.shard_size(), 1) - .unwrap(); + let restored_large_blob_array = persistent_store.get_large_blob_array(4, 1).unwrap(); assert_eq!(Vec::::new(), restored_large_blob_array); assert!(persistent_store.commit_large_blob_array(&[]).is_ok()); - let restored_large_blob_array = persistent_store - .get_large_blob_array(0, persistent_store.shard_size() + 1) - .unwrap(); + let restored_large_blob_array = persistent_store.get_large_blob_array(0, 20).unwrap(); // Committing an empty array resets to the default blob of 17 byte. assert_eq!(restored_large_blob_array.len(), 17); } @@ -1375,9 +1263,7 @@ mod test { 0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D, 0x6F, 0xA5, 0x7A, 0x6D, 0x3C, ]; - let restored_large_blob_array = persistent_store - .get_large_blob_array(0, persistent_store.shard_size()) - .unwrap(); + let restored_large_blob_array = persistent_store.get_large_blob_array(0, 17).unwrap(); assert_eq!(empty_blob_array, restored_large_blob_array); let restored_large_blob_array = persistent_store.get_large_blob_array(0, 1).unwrap(); assert_eq!(vec![0x80], restored_large_blob_array);