diff --git a/src/dbus_api/consts.rs b/src/dbus_api/consts.rs index 16ea2377d5..333470b12c 100644 --- a/src/dbus_api/consts.rs +++ b/src/dbus_api/consts.rs @@ -67,6 +67,7 @@ pub const FILESYSTEM_CREATED_PROP: &str = "Created"; pub const FILESYSTEM_SIZE_PROP: &str = "Size"; pub const FILESYSTEM_SIZE_LIMIT_PROP: &str = "SizeLimit"; pub const FILESYSTEM_ORIGIN_PROP: &str = "Origin"; +pub const FILESYSTEM_MERGE_SCHEDULED_PROP: &str = "MergeScheduled"; pub const BLOCKDEV_INTERFACE_NAME_3_0: &str = "org.storage.stratis3.blockdev.r0"; pub const BLOCKDEV_INTERFACE_NAME_3_1: &str = "org.storage.stratis3.blockdev.r1"; diff --git a/src/dbus_api/filesystem/filesystem_3_7/api.rs b/src/dbus_api/filesystem/filesystem_3_7/api.rs index b8864240e9..54f7cde132 100644 --- a/src/dbus_api/filesystem/filesystem_3_7/api.rs +++ b/src/dbus_api/filesystem/filesystem_3_7/api.rs @@ -4,7 +4,13 @@ use dbus_tree::{Access, EmitsChangedSignal, Factory, MTSync, Property}; -use crate::dbus_api::{consts, filesystem::filesystem_3_7::props::get_fs_origin, types::TData}; +use crate::dbus_api::{ + consts, + filesystem::filesystem_3_7::props::{ + get_fs_merge_scheduled, get_fs_origin, set_fs_merge_scheduled, + }, + types::TData, +}; pub fn origin_property(f: &Factory, TData>) -> Property, TData> { f.property::<(bool, String), _>(consts::FILESYSTEM_ORIGIN_PROP, ()) @@ -12,3 +18,13 @@ pub fn origin_property(f: &Factory, TData>) -> Property, TData>, +) -> Property, TData> { + f.property::(consts::FILESYSTEM_MERGE_SCHEDULED_PROP, ()) + .access(Access::ReadWrite) + .emits_changed(EmitsChangedSignal::True) + .on_get(get_fs_merge_scheduled) + .on_set(set_fs_merge_scheduled) +} diff --git a/src/dbus_api/filesystem/filesystem_3_7/mod.rs b/src/dbus_api/filesystem/filesystem_3_7/mod.rs index 4028a40d67..ca79e7c6c1 100644 --- a/src/dbus_api/filesystem/filesystem_3_7/mod.rs +++ b/src/dbus_api/filesystem/filesystem_3_7/mod.rs @@ -5,4 +5,4 @@ mod api; mod props; -pub use api::origin_property; +pub use api::{merge_scheduled_property, origin_property}; diff --git a/src/dbus_api/filesystem/filesystem_3_7/props.rs b/src/dbus_api/filesystem/filesystem_3_7/props.rs index 5fa35ee585..ec27d1cae9 100644 --- a/src/dbus_api/filesystem/filesystem_3_7/props.rs +++ b/src/dbus_api/filesystem/filesystem_3_7/props.rs @@ -2,12 +2,16 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. -use dbus::arg::IterAppend; +use dbus::arg::{Iter, IterAppend}; use dbus_tree::{MTSync, MethodErr, PropInfo}; -use crate::dbus_api::{ - filesystem::shared::{self, get_filesystem_property}, - types::TData, +use crate::{ + dbus_api::{ + consts, + filesystem::shared::{self, get_filesystem_property}, + types::TData, + }, + engine::PropChangeAction, }; pub fn get_fs_origin( @@ -16,3 +20,37 @@ pub fn get_fs_origin( ) -> Result<(), MethodErr> { get_filesystem_property(i, p, |(_, _, f)| Ok(shared::fs_origin_prop(f))) } + +pub fn get_fs_merge_scheduled( + i: &mut IterAppend<'_>, + p: &PropInfo<'_, MTSync, TData>, +) -> Result<(), MethodErr> { + get_filesystem_property(i, p, |(_, _, f)| Ok(shared::fs_merge_scheduled_prop(f))) +} + +/// Set the merge scheduled property on a filesystem +pub fn set_fs_merge_scheduled( + i: &mut Iter<'_>, + p: &PropInfo<'_, MTSync, TData>, +) -> Result<(), MethodErr> { + let merge_scheduled: bool = i + .get() + .ok_or_else(|| MethodErr::failed("Value required as argument to set property"))?; + + let res = shared::set_fs_property_to_display( + p, + consts::FILESYSTEM_MERGE_SCHEDULED_PROP, + |(_, uuid, p)| shared::set_fs_merge_scheduled_prop(uuid, p, merge_scheduled), + ); + + match res { + Ok(PropChangeAction::NewValue(v)) => { + p.tree + .get_data() + .push_fs_merge_scheduled_change(p.path.get_name(), v); + Ok(()) + } + Ok(PropChangeAction::Identity) => Ok(()), + Err(e) => Err(e), + } +} diff --git a/src/dbus_api/filesystem/mod.rs b/src/dbus_api/filesystem/mod.rs index 095a436cea..bbf0c7e109 100644 --- a/src/dbus_api/filesystem/mod.rs +++ b/src/dbus_api/filesystem/mod.rs @@ -126,7 +126,8 @@ pub fn create_dbus_filesystem<'a>( .add_p(filesystem_3_0::size_property(&f)) .add_p(filesystem_3_0::used_property(&f)) .add_p(filesystem_3_6::size_limit_property(&f)) - .add_p(filesystem_3_7::origin_property(&f)), + .add_p(filesystem_3_7::origin_property(&f)) + .add_p(filesystem_3_7::merge_scheduled_property(&f)), ); let path = object_path.get_name().to_owned(); @@ -217,7 +218,8 @@ pub fn get_fs_properties( consts::FILESYSTEM_SIZE_PROP => shared::fs_size_prop(fs), consts::FILESYSTEM_USED_PROP => shared::fs_used_prop(fs), consts::FILESYSTEM_SIZE_LIMIT_PROP => shared::fs_size_limit_prop(fs), - consts::FILESYSTEM_ORIGIN_PROP => shared::fs_origin_prop(fs) + consts::FILESYSTEM_ORIGIN_PROP => shared::fs_origin_prop(fs), + consts::FILESYSTEM_MERGE_SCHEDULED_PROP => shared::fs_merge_scheduled_prop(fs) } } } diff --git a/src/dbus_api/filesystem/shared.rs b/src/dbus_api/filesystem/shared.rs index 8efd2d72df..2fbe3d6388 100644 --- a/src/dbus_api/filesystem/shared.rs +++ b/src/dbus_api/filesystem/shared.rs @@ -207,3 +207,18 @@ pub fn fs_used_prop(fs: &dyn Filesystem) -> (bool, String) { pub fn fs_origin_prop(fs: &dyn Filesystem) -> (bool, String) { prop_conv::fs_origin_to_prop(fs.origin()) } + +/// Generate D-Bus representation of merge scheduled property +pub fn fs_merge_scheduled_prop(fs: &dyn Filesystem) -> bool { + fs.merge_scheduled() +} + +#[inline] +pub fn set_fs_merge_scheduled_prop( + uuid: FilesystemUuid, + pool: &mut dyn Pool, + schedule: bool, +) -> Result, String> { + pool.set_fs_merge_scheduled(uuid, schedule) + .map_err(|e| e.to_string()) +} diff --git a/src/dbus_api/pool/pool_3_7/methods.rs b/src/dbus_api/pool/pool_3_7/methods.rs index 8f61dec9c3..37fb97381c 100644 --- a/src/dbus_api/pool/pool_3_7/methods.rs +++ b/src/dbus_api/pool/pool_3_7/methods.rs @@ -74,16 +74,16 @@ pub fn destroy_filesystems(m: &MethodInfo<'_, MTSync, TData>) -> MethodRe dbus_context.push_remove(op, filesystem_interface_list()); } - for sn_op in m.tree.iter().filter(|op| { - op.get_data() - .as_ref() - .map(|data| match data.uuid { - StratisUuid::Fs(uuid) => updated_uuids.contains(&uuid), - _ => false, - }) - .unwrap_or(false) + for (sn_op, origin) in m.tree.iter().filter_map(|op| { + op.get_data().as_ref().and_then(|data| match data.uuid { + StratisUuid::Fs(uuid) => updated_uuids + .iter() + .find(|(u, _)| *u == uuid) + .map(|(_, origin)| (op, *origin)), + _ => None, + }) }) { - dbus_context.push_filesystem_origin_change(sn_op.get_name()); + dbus_context.push_filesystem_origin_change(sn_op.get_name(), origin); } changed_uuids diff --git a/src/dbus_api/tree.rs b/src/dbus_api/tree.rs index 1fadb300a6..543f4801d3 100644 --- a/src/dbus_api/tree.rs +++ b/src/dbus_api/tree.rs @@ -203,8 +203,7 @@ impl DbusTreeHandler { } } - fn handle_fs_origin_change(&self, item: Path<'static>) { - let origin_prop = fs_origin_to_prop(None); + fn handle_fs_origin_change(&self, item: Path<'static>, new_origin: Option) { if self .property_changed_invalidated_signal( &item, @@ -212,7 +211,7 @@ impl DbusTreeHandler { consts::FILESYSTEM_INTERFACE_NAME_3_7 => { vec![], consts::FILESYSTEM_ORIGIN_PROP.to_string() => - box_variant!(origin_prop.clone()) + box_variant!(fs_origin_to_prop(new_origin)) } }, ) @@ -873,6 +872,26 @@ impl DbusTreeHandler { } } + /// Send a signal indicating that the filesystem merge scheduled value has + /// changed. + fn handle_fs_merge_scheduled_change(&self, path: Path<'static>, new_scheduled: bool) { + if let Err(e) = self.property_changed_invalidated_signal( + &path, + prop_hashmap!( + consts::FILESYSTEM_INTERFACE_NAME_3_7 => { + Vec::new(), + consts::FILESYSTEM_MERGE_SCHEDULED_PROP.to_string() => + box_variant!(new_scheduled) + } + ), + ) { + warn!( + "Failed to send a signal over D-Bus indicating filesystem merge scheduled value change: {}", + e + ); + } + } + /// Send a signal indicating that the blockdev user info has changed. fn handle_blockdev_user_info_change(&self, path: Path<'static>, new_user_info: Option) { let user_info_prop = blockdev_user_info_to_prop(new_user_info); @@ -1216,8 +1235,8 @@ impl DbusTreeHandler { self.handle_fs_name_change(item, new_name); Ok(true) } - DbusAction::FsOriginChange(item) => { - self.handle_fs_origin_change(item); + DbusAction::FsOriginChange(item, new_origin) => { + self.handle_fs_origin_change(item, new_origin); Ok(true) } DbusAction::PoolNameChange(item, new_name) => { @@ -1254,6 +1273,10 @@ impl DbusTreeHandler { self.handle_fs_size_limit_change(path, new_limit); Ok(true) } + DbusAction::FsMergeScheduledChange(path, new_scheduled) => { + self.handle_fs_merge_scheduled_change(path, new_scheduled); + Ok(true) + } DbusAction::PoolOverprovModeChange(path, new_mode) => { self.handle_pool_overprov_mode_change(path, new_mode); Ok(true) diff --git a/src/dbus_api/types.rs b/src/dbus_api/types.rs index af345c152c..0713c52e01 100644 --- a/src/dbus_api/types.rs +++ b/src/dbus_api/types.rs @@ -106,8 +106,9 @@ pub enum DbusAction { StoppedPoolsChange(StoppedPoolsInfo), BlockdevUserInfoChange(Path<'static>, Option), BlockdevTotalPhysicalSizeChange(Path<'static>, Sectors), - FsOriginChange(Path<'static>), + FsOriginChange(Path<'static>, Option), FsSizeLimitChange(Path<'static>, Option), + FsMergeScheduledChange(Path<'static>, bool), FsBackgroundChange( FilesystemUuid, SignalChange>, @@ -490,14 +491,34 @@ impl DbusContext { } /// Send changed signal for changed filesystem origin property. - pub fn push_filesystem_origin_change(&self, path: &Path<'static>) { - if let Err(e) = self.sender.send(DbusAction::FsOriginChange(path.clone())) { + pub fn push_filesystem_origin_change( + &self, + path: &Path<'static>, + origin: Option, + ) { + if let Err(e) = self + .sender + .send(DbusAction::FsOriginChange(path.clone(), origin)) + { warn!( "Filesystem origin change event could not be sent to the processing thread; no signal will be sent out for the filesystem origin state change: {}", e, ) } } + + /// Send changed signal for pool MergeScheduled property. + pub fn push_fs_merge_scheduled_change(&self, item: &Path<'static>, new_merge_scheduled: bool) { + if let Err(e) = self.sender.send(DbusAction::FsMergeScheduledChange( + item.clone(), + new_merge_scheduled, + )) { + warn!( + "D-Bus filesystem merge scheduled change event could not be sent to the processing thread; no signal will be sent out for the merge scheduled change of filesystem with path {}: {}", + item, e, + ) + } + } } #[derive(Debug)] diff --git a/src/engine/engine.rs b/src/engine/engine.rs index 4f96493cf5..dc20fd76fa 100644 --- a/src/engine/engine.rs +++ b/src/engine/engine.rs @@ -96,6 +96,8 @@ pub trait Filesystem: Debug { /// Get filesystem snapshot origin. fn origin(&self) -> Option; + + fn merge_scheduled(&self) -> bool; } pub trait BlockDev: Debug { @@ -210,7 +212,7 @@ pub trait Pool: Debug + Send + Sync { &mut self, pool_name: &str, fs_uuids: &HashSet, - ) -> StratisResult>; + ) -> StratisResult)>>; /// Rename filesystem /// Rename pool with uuid to new_name. @@ -356,6 +358,13 @@ pub trait Pool: Debug + Send + Sync { /// Get the last written filesystem metadata. fn last_fs_metadata(&self, fs_name: Option<&str>) -> StratisResult; + + /// Set whether a merge of the filesystem is scheduled. + fn set_fs_merge_scheduled( + &mut self, + fs: FilesystemUuid, + new_scheduled: bool, + ) -> StratisResult>; } pub type HandleEvents

= ( diff --git a/src/engine/sim_engine/filesystem.rs b/src/engine/sim_engine/filesystem.rs index b6661bab3a..7aacbf2894 100644 --- a/src/engine/sim_engine/filesystem.rs +++ b/src/engine/sim_engine/filesystem.rs @@ -36,6 +36,7 @@ pub struct SimFilesystem { size: Sectors, size_limit: Option, origin: Option, + merge_scheduled: bool, } impl SimFilesystem { @@ -57,6 +58,7 @@ impl SimFilesystem { size, size_limit, origin, + merge_scheduled: false, }) } @@ -83,9 +85,9 @@ impl SimFilesystem { } } - pub fn unset_origin(&mut self) -> bool { - let changed = self.origin.is_some(); - self.origin = None; + pub fn set_origin(&mut self, value: Option) -> bool { + let changed = self.origin != value; + self.origin = value; changed } @@ -99,6 +101,20 @@ impl SimFilesystem { origin: self.origin, } } + + /// Set the merge scheduled value for the filesystem. + pub fn set_merge_scheduled(&mut self, scheduled: bool) -> StratisResult { + if self.merge_scheduled == scheduled { + Ok(false) + } else if scheduled && self.origin.is_none() { + Err(StratisError::Msg( + "Filesystem has no origin filesystem; can not schedule a merge".into(), + )) + } else { + self.merge_scheduled = scheduled; + Ok(true) + } + } } impl Filesystem for SimFilesystem { @@ -131,6 +147,10 @@ impl Filesystem for SimFilesystem { fn origin(&self) -> Option { self.origin } + + fn merge_scheduled(&self) -> bool { + self.merge_scheduled + } } impl<'a> Into for &'a SimFilesystem { diff --git a/src/engine/sim_engine/pool.rs b/src/engine/sim_engine/pool.rs index 8676a82819..eeac131d03 100644 --- a/src/engine/sim_engine/pool.rs +++ b/src/engine/sim_engine/pool.rs @@ -125,13 +125,6 @@ impl SimPool { } } - fn filesystems_mut(&mut self) -> Vec<(Name, FilesystemUuid, &mut SimFilesystem)> { - self.filesystems - .iter_mut() - .map(|(name, uuid, x)| (name.clone(), *uuid, x)) - .collect() - } - pub fn record(&self, name: &str) -> PoolSave { PoolSave { name: name.to_owned(), @@ -488,31 +481,59 @@ impl Pool for SimPool { &mut self, _pool_name: &str, fs_uuids: &HashSet, - ) -> StratisResult> { - let mut removed = Vec::new(); - for &uuid in fs_uuids { - if self.filesystems.remove_by_uuid(uuid).is_some() { - removed.push(uuid); - } - } - - let updated_origins: Vec = self - .filesystems_mut() - .iter_mut() + ) -> StratisResult)>> + { + let mut snapshots = self + .filesystems() + .iter() .filter_map(|(_, u, fs)| { fs.origin().and_then(|x| { - if removed.contains(&x) { - if fs.unset_origin() { - Some(*u) - } else { - None - } + if fs_uuids.contains(&x) { + Some((x, (*u, fs.merge_scheduled()))) } else { None } }) }) - .collect(); + .fold(HashMap::new(), |mut acc, (u, v)| { + acc.entry(u) + .and_modify(|e: &mut Vec<_>| e.push(v)) + .or_insert(vec![v]); + acc + }); + + let scheduled_for_merge = snapshots + .iter() + .filter(|(_, snaps)| snaps.iter().any(|(_, scheduled)| *scheduled)) + .collect::>(); + if !scheduled_for_merge.is_empty() { + let err_str = format!("The filesystem destroy operation can not be begun until the revert operations for the following filesystem snapshots have been cancelled: {}", scheduled_for_merge.iter().map(|(u, _)| u.to_string()).collect::>().join(", ")); + return Err(StratisError::Msg(err_str)); + } + + let (mut removed, mut updated_origins) = (Vec::new(), Vec::new()); + for &uuid in fs_uuids { + if let Some((_, fs)) = self.get_filesystem(uuid) { + let fs_origin = fs.origin(); + self.filesystems + .remove_by_uuid(uuid) + .expect("just looked up"); + removed.push(uuid); + + for (sn_uuid, _) in snapshots.remove(&uuid).unwrap_or_else(Vec::new) { + // The filesystems may have been removed; any one of + // them may also be a filesystem that was scheduled for + // removal. + if let Some((_, sn)) = self.filesystems.get_mut_by_uuid(sn_uuid) { + assert!( + sn.set_origin(fs_origin), + "A snapshot can only have one origin, so it can be in snapshots.values() only once, so its origin value can be set only once" + ); + updated_origins.push((sn_uuid, fs_origin)); + }; + } + } + } Ok(SetDeleteAction::new(removed, updated_origins)) } @@ -781,6 +802,80 @@ impl Pool for SimPool { // current fs metadata are, by definition, the same. self.current_fs_metadata(fs_name) } + + fn set_fs_merge_scheduled( + &mut self, + fs_uuid: FilesystemUuid, + scheduled: bool, + ) -> StratisResult> { + let (_, fs) = self + .filesystems + .get_by_uuid(fs_uuid) + .ok_or_else(|| StratisError::Msg(format!("No filesystem with UUID {fs_uuid} found")))?; + + let origin = fs.origin().ok_or_else(|| { + StratisError::Msg(format!( + "Filesystem {fs_uuid} has no origin, revert cannot be scheduled" + )) + })?; + + if fs.merge_scheduled() == scheduled { + return Ok(PropChangeAction::Identity); + } + + if scheduled { + if self + .filesystems + .get_by_uuid(origin) + .map(|(_, fs)| fs.merge_scheduled()) + .unwrap_or(false) + { + return Err(StratisError::Msg(format!( + "Filesystem {fs_uuid} is scheduled to replace filesystem {origin}, but filesystem {origin} is already scheduled to replace another filesystem. Since the order in which the filesystems should replace each other is unknown, this operation can not be performed." + ))); + } + + let (others_scheduled, into_scheduled) = + self.filesystems + .iter() + .fold((Vec::new(), Vec::new()), |mut acc, (u, n, f)| { + if f.origin().map(|o| o == origin).unwrap_or(false) && f.merge_scheduled() { + acc.0.push((u, n, f)); + } + if f.origin().map(|o| o == fs_uuid).unwrap_or(false) && f.merge_scheduled() + { + acc.1.push((u, n, f)); + } + acc + }); + + if let Some((n, u, _)) = others_scheduled.first() { + return Err(StratisError::Msg(format!( + "Filesystem {:} with UUID {:} is already scheduled to be reverted into origin filesystem {:}", + n, u, origin, + ))); + } + + if let Some((n, u, _)) = into_scheduled.first() { + return Err(StratisError::Msg(format!( + "Filesystem {:} with UUID {:} is already scheduled to be reverted into this filesystem {:}. The ordering is ambiguous, unwilling to schedule a revert", + n, u, origin, + ))); + } + } + + assert!( + self.filesystems + .get_mut_by_uuid(fs_uuid) + .expect("Looked up above") + .1 + .set_merge_scheduled(scheduled) + .expect("fs.origin() is not None"), + "Already returned from this method if value to set is the same as current" + ); + + Ok(PropChangeAction::NewValue(scheduled)) + } } #[cfg(test)] diff --git a/src/engine/strat_engine/pool/dispatch.rs b/src/engine/strat_engine/pool/dispatch.rs index a99db97e4b..f9c8e84254 100644 --- a/src/engine/strat_engine/pool/dispatch.rs +++ b/src/engine/strat_engine/pool/dispatch.rs @@ -123,7 +123,8 @@ impl Pool for AnyPool { &mut self, pool_name: &str, fs_uuids: &HashSet, - ) -> StratisResult> { + ) -> StratisResult)>> + { match self { AnyPool::V1(p) => p.destroy_filesystems(pool_name, fs_uuids), AnyPool::V2(p) => p.destroy_filesystems(pool_name, fs_uuids), @@ -362,4 +363,15 @@ impl Pool for AnyPool { AnyPool::V2(p) => p.last_fs_metadata(fs_name), } } + + fn set_fs_merge_scheduled( + &mut self, + fs_uuid: FilesystemUuid, + new_scheduled: bool, + ) -> StratisResult> { + match self { + AnyPool::V1(p) => p.set_fs_merge_scheduled(fs_uuid, new_scheduled), + AnyPool::V2(p) => p.set_fs_merge_scheduled(fs_uuid, new_scheduled), + } + } } diff --git a/src/engine/strat_engine/pool/v1.rs b/src/engine/strat_engine/pool/v1.rs index d90e268d2c..984e1c33a4 100644 --- a/src/engine/strat_engine/pool/v1.rs +++ b/src/engine/strat_engine/pool/v1.rs @@ -1022,37 +1022,9 @@ impl Pool for StratPool { &mut self, pool_name: &str, fs_uuids: &HashSet, - ) -> StratisResult> { - let mut removed = Vec::new(); - for &uuid in fs_uuids { - if let Some(uuid) = self.thin_pool.destroy_filesystem(pool_name, uuid)? { - removed.push(uuid); - } - } - - let snapshots: Vec = self - .thin_pool - .filesystems() - .iter() - .filter_map(|(_, u, fs)| { - fs.origin() - .and_then(|x| if removed.contains(&x) { Some(*u) } else { None }) - }) - .collect(); - - let mut updated_origins = vec![]; - for sn_uuid in snapshots { - if let Err(err) = self.thin_pool.unset_fs_origin(sn_uuid) { - warn!( - "Failed to write null origin to metadata for filesystem with UUID {}: {}", - sn_uuid, err - ); - } else { - updated_origins.push(sn_uuid); - } - } - - Ok(SetDeleteAction::new(removed, updated_origins)) + ) -> StratisResult)>> + { + self.thin_pool.destroy_filesystems(pool_name, fs_uuids) } #[pool_mutating_action("NoRequests")] @@ -1306,6 +1278,22 @@ impl Pool for StratPool { fn last_fs_metadata(&self, fs_name: Option<&str>) -> StratisResult { self.thin_pool.last_fs_metadata(fs_name) } + + #[pool_mutating_action("NoRequests")] + fn set_fs_merge_scheduled( + &mut self, + fs_uuid: FilesystemUuid, + new_scheduled: bool, + ) -> StratisResult> { + if self + .thin_pool + .set_fs_merge_scheduled(fs_uuid, new_scheduled)? + { + Ok(PropChangeAction::NewValue(new_scheduled)) + } else { + Ok(PropChangeAction::Identity) + } + } } pub struct StratPoolState { diff --git a/src/engine/strat_engine/pool/v2.rs b/src/engine/strat_engine/pool/v2.rs index 9406df6f16..d0990011b4 100644 --- a/src/engine/strat_engine/pool/v2.rs +++ b/src/engine/strat_engine/pool/v2.rs @@ -921,37 +921,9 @@ impl Pool for StratPool { &mut self, pool_name: &str, fs_uuids: &HashSet, - ) -> StratisResult> { - let mut removed = Vec::new(); - for &uuid in fs_uuids { - if let Some(uuid) = self.thin_pool.destroy_filesystem(pool_name, uuid)? { - removed.push(uuid); - } - } - - let snapshots: Vec = self - .thin_pool - .filesystems() - .iter() - .filter_map(|(_, u, fs)| { - fs.origin() - .and_then(|x| if removed.contains(&x) { Some(*u) } else { None }) - }) - .collect(); - - let mut updated_origins = vec![]; - for sn_uuid in snapshots { - if let Err(err) = self.thin_pool.unset_fs_origin(sn_uuid) { - warn!( - "Failed to write null origin to metadata for filesystem with UUID {}: {}", - sn_uuid, err - ); - } else { - updated_origins.push(sn_uuid); - } - } - - Ok(SetDeleteAction::new(removed, updated_origins)) + ) -> StratisResult)>> + { + self.thin_pool.destroy_filesystems(pool_name, fs_uuids) } #[pool_mutating_action("NoRequests")] @@ -1206,6 +1178,22 @@ impl Pool for StratPool { fn last_fs_metadata(&self, fs_name: Option<&str>) -> StratisResult { self.thin_pool.last_fs_metadata(fs_name) } + + #[pool_mutating_action("NoRequests")] + fn set_fs_merge_scheduled( + &mut self, + fs_uuid: FilesystemUuid, + new_scheduled: bool, + ) -> StratisResult> { + if self + .thin_pool + .set_fs_merge_scheduled(fs_uuid, new_scheduled)? + { + Ok(PropChangeAction::NewValue(new_scheduled)) + } else { + Ok(PropChangeAction::Identity) + } + } } pub struct StratPoolState { diff --git a/src/engine/strat_engine/serde_structs.rs b/src/engine/strat_engine/serde_structs.rs index aacdb5fa0a..63c9e69f03 100644 --- a/src/engine/strat_engine/serde_structs.rs +++ b/src/engine/strat_engine/serde_structs.rs @@ -196,6 +196,13 @@ pub struct FilesystemSave { pub fs_size_limit: Option, #[serde(skip_serializing_if = "Option::is_none")] pub origin: Option, + // if self.origin is None, then self.merge must be None + // if self.origin is Some, + // then self.merge is None is equivalent to self.merge is Some(false) + // this definition is fully backward compatible. + // TODO: This data type should no longer be optional in Stratis 4.0 + #[serde(skip_serializing_if = "Option::is_none")] + pub merge: Option, } #[cfg(test)] diff --git a/src/engine/strat_engine/shared.rs b/src/engine/strat_engine/shared.rs index 8a11e7e240..8dcce30de3 100644 --- a/src/engine/strat_engine/shared.rs +++ b/src/engine/strat_engine/shared.rs @@ -5,7 +5,9 @@ use std::collections::HashMap; use crate::engine::{ - strat_engine::{backstore::blockdev::InternalBlockDev, metadata::BDA}, + strat_engine::{ + backstore::blockdev::InternalBlockDev, metadata::BDA, serde_structs::FilesystemSave, + }, types::DevUuid, }; @@ -38,3 +40,18 @@ where .chain(bda.map(|bda| (bda.dev_uuid(), bda))) .collect::>() } + +/// Define how an origin and its snapshot are merged when a filesystem is +/// reverted. +pub fn merge(origin: &FilesystemSave, snap: &FilesystemSave) -> FilesystemSave { + FilesystemSave { + name: origin.name.to_owned(), + uuid: origin.uuid, + thin_id: snap.thin_id, + size: snap.size, + created: origin.created, + fs_size_limit: snap.fs_size_limit, + origin: origin.origin, + merge: origin.merge, + } +} diff --git a/src/engine/strat_engine/thinpool/filesystem.rs b/src/engine/strat_engine/thinpool/filesystem.rs index ba3748a507..0f971e0403 100644 --- a/src/engine/strat_engine/thinpool/filesystem.rs +++ b/src/engine/strat_engine/thinpool/filesystem.rs @@ -52,6 +52,7 @@ pub struct StratFilesystem { used: Option, size_limit: Option, origin: Option, + merge_scheduled: bool, } fn init_used(thin_dev: &ThinDev) -> Option { @@ -116,6 +117,7 @@ impl StratFilesystem { created: Utc::now(), size_limit, origin: None, + merge_scheduled: false, }, )) } @@ -143,6 +145,7 @@ impl StratFilesystem { created, size_limit: fssave.fs_size_limit, origin: fssave.origin, + merge_scheduled: fssave.merge.unwrap_or_default(), }) } @@ -250,6 +253,7 @@ impl StratFilesystem { created: Utc::now(), size_limit: self.size_limit, origin: Some(origin_uuid), + merge_scheduled: false, }) } Err(e) => Err(StratisError::Msg(format!( @@ -390,6 +394,7 @@ impl StratFilesystem { created: self.created.timestamp() as u64, fs_size_limit: self.size_limit, origin: self.origin, + merge: self.origin.map(|_| self.merge_scheduled), } } @@ -445,11 +450,29 @@ impl StratFilesystem { self.thin_dev.size() } - pub fn unset_origin(&mut self) -> bool { - let changed = self.origin.is_some(); - self.origin = None; + pub fn set_origin(&mut self, value: Option) -> bool { + let changed = self.origin != value; + self.origin = value; changed } + + /// Set the merge scheduled value for the filesystem. + pub fn set_merge_scheduled(&mut self, scheduled: bool) -> StratisResult { + if self.merge_scheduled == scheduled { + Ok(false) + } else if scheduled && self.origin.is_none() { + Err(StratisError::Msg( + "Filesystem has no origin filesystem; can not schedule a merge".into(), + )) + } else { + self.merge_scheduled = scheduled; + Ok(true) + } + } + + pub fn thin_id(&self) -> ThinDevId { + self.thin_dev.id() + } } impl Filesystem for StratFilesystem { @@ -493,6 +516,10 @@ impl Filesystem for StratFilesystem { fn origin(&self) -> Option { self.origin } + + fn merge_scheduled(&self) -> bool { + self.merge_scheduled + } } /// Represents the state of the Stratis filesystem at a given moment in time. diff --git a/src/engine/strat_engine/thinpool/thinpool.rs b/src/engine/strat_engine/thinpool/thinpool.rs index 5c380d6dad..f7b376ebc4 100644 --- a/src/engine/strat_engine/thinpool/thinpool.rs +++ b/src/engine/strat_engine/thinpool/thinpool.rs @@ -17,28 +17,32 @@ use retry::{delay::Fixed, retry_with_index}; use serde_json::{Map, Value}; use devicemapper::{ - device_exists, Bytes, DataBlocks, Device, DmDevice, DmName, DmNameBuf, DmOptions, + device_exists, message, Bytes, DataBlocks, Device, DmDevice, DmName, DmNameBuf, DmOptions, FlakeyTargetParams, LinearDev, LinearDevTargetParams, LinearTargetParams, MetaBlocks, Sectors, TargetLine, ThinDevId, ThinPoolDev, ThinPoolStatus, ThinPoolStatusSummary, ThinPoolUsage, IEC, }; use crate::{ engine::{ - engine::{DumpState, StateDiff}, + engine::{DumpState, Filesystem, StateDiff}, strat_engine::{ backstore::backstore::{v1, v2, InternalBackstore}, - cmd::{thin_check, thin_metadata_size, thin_repair}, + cmd::{set_uuid, thin_check, thin_metadata_size, thin_repair}, dm::{get_dm, list_of_thin_pool_devices, remove_optional_devices}, names::{ format_flex_ids, format_thin_ids, format_thinpool_ids, FlexRole, ThinPoolRole, ThinRole, }, serde_structs::{FlexDevsSave, Recordable, ThinPoolDevSave}, + shared::merge, thinpool::{filesystem::StratFilesystem, mdv::MetadataVol, thinids::ThinDevIdPool}, writing::wipe_sectors, }, structures::Table, - types::{Compare, Diff, FilesystemUuid, Name, PoolUuid, StratFilesystemDiff, ThinPoolDiff}, + types::{ + Compare, Diff, FilesystemUuid, Name, PoolUuid, SetDeleteAction, StratFilesystemDiff, + ThinPoolDiff, + }, }, stratis::{StratisError, StratisResult}, }; @@ -604,7 +608,7 @@ impl ThinPool { /// * Ok(Some(uuid)) provides the uuid of the destroyed filesystem /// * Ok(None) is returned if the filesystem did not exist /// * Err(_) is returned if the filesystem could not be destroyed - pub fn destroy_filesystem( + fn destroy_filesystem( &mut self, pool_name: &str, uuid: FilesystemUuid, @@ -766,6 +770,84 @@ impl ThinPool { ) .map_err(|e| e.into()) } + + pub fn destroy_filesystems( + &mut self, + pool_name: &str, + fs_uuids: &HashSet, + ) -> StratisResult)>> + { + let to_be_merged = fs_uuids + .iter() + .filter(|u| { + self.get_filesystem_by_uuid(**u) + .map(|(_, fs)| fs.merge_scheduled()) + .unwrap_or(false) + }) + .collect::>(); + + if !to_be_merged.is_empty() { + let err_str = format!("The filesystem destroy operation can not be begun until the revert operations for the following filesystem snapshots have been cancelled: {}", to_be_merged.iter().map(|u| u.to_string()).collect::>().join(", ")); + return Err(StratisError::Msg(err_str)); + } + + let mut snapshots = self + .filesystems() + .iter() + .filter_map(|(_, u, fs)| { + fs.origin().and_then(|x| { + if fs_uuids.contains(&x) { + Some((x, (*u, fs.merge_scheduled()))) + } else { + None + } + }) + }) + .fold(HashMap::new(), |mut acc, (u, v)| { + acc.entry(u) + .and_modify(|e: &mut Vec<(FilesystemUuid, _)>| e.push(v)) + .or_insert(vec![v]); + acc + }); + + let scheduled_for_merge = snapshots + .iter() + .filter(|(_, snaps)| snaps.iter().any(|(_, scheduled)| *scheduled)) + .collect::>(); + if !scheduled_for_merge.is_empty() { + let err_str = format!("The filesystem destroy operation can not be begun until the revert operations for the following filesystem snapshots have been cancelled: {}", scheduled_for_merge.iter().map(|(u, _)| u.to_string()).collect::>().join(", ")); + return Err(StratisError::Msg(err_str)); + } + + let (mut removed, mut updated_origins) = (Vec::new(), Vec::new()); + for &uuid in fs_uuids { + if let Some((_, fs)) = self.get_filesystem_by_uuid(uuid) { + let fs_origin = fs.origin(); + let uuid = self + .destroy_filesystem(pool_name, uuid)? + .expect("just looked up"); + removed.push(uuid); + + for (sn_uuid, _) in snapshots.remove(&uuid).unwrap_or_else(Vec::new) { + // The filesystems may have been removed; any one of + // them may also be a filesystem that was scheduled for + // removal. + if let Some((_, sn)) = self.get_mut_filesystem_by_uuid(sn_uuid) { + assert!( + sn.set_origin(fs_origin), + "A snapshot can only have one origin, so it can be in snapshots.values() only once, so its origin value can be set only once" + ); + updated_origins.push((sn_uuid, fs_origin)); + + let (name, sn) = self.get_filesystem_by_uuid(sn_uuid).expect("just got"); + self.mdv.save_fs(&name, sn_uuid, sn)?; + }; + } + } + } + + Ok(SetDeleteAction::new(removed, updated_origins)) + } } impl ThinPool { @@ -1102,6 +1184,101 @@ where let backstore_device = backstore.device().expect("When stratisd was running previously, space was allocated from the backstore, so backstore must have a cap device"); + let (dm_name, dm_uuid) = format_flex_ids(pool_uuid, FlexRole::MetadataVolume); + let mdv_dev = LinearDev::setup( + get_dm(), + &dm_name, + Some(&dm_uuid), + segs_to_table(backstore_device, &mdv_segments), + )?; + let mdv = MetadataVol::setup(pool_uuid, mdv_dev)?; + + let mut filesystem_metadata_map = HashMap::new(); + let mut names = HashSet::new(); + for fs in mdv.filesystems()?.drain(..) { + if !names.insert(fs.name.to_string()) { + return Err(StratisError::Msg(format!( + "Two filesystems with the same name, {:}, found in filesystem metadata", + fs.name + ))); + } + + let fs_uuid = *fs.uuid; + if filesystem_metadata_map.insert(fs_uuid, fs).is_some() { + return Err(StratisError::Msg(format!( + "Two filesystems with the same UUID, {:}, found in filesystem metadata", + fs_uuid + ))); + } + } + + let (duplicates_scheduled, mut ready_to_merge, origins, snaps_to_merge) = filesystem_metadata_map + .values() + .filter(|md| md.merge.unwrap_or(false)) + .fold(HashMap::new(), |mut acc, md| { + match md.origin { + None => { + warn!("Filesystem with UUID {:} and name {:} which has no origin has been scheduled to be merged; this makes no sense.", md.uuid, md.name); + } + Some(origin) => { + acc.entry(origin) + .and_modify(|e: &mut Vec<_>| e.push(md)) + .or_insert(vec![md]); + } + }; + acc + }).drain() + .fold((HashMap::new(), HashMap::new(), HashSet::new(), HashSet::new()), |mut acc, (origin, ss)| { + if ss.len() > 1 { + acc.0.insert(origin, ss); + } else { + let snap = ss[0].uuid; + acc.1.insert(origin, snap); + acc.2.insert(origin); + acc.3.insert(snap); + }; + acc + }); + + if !duplicates_scheduled.is_empty() { + let msg_string = duplicates_scheduled + .iter() + .map(|(origin, ss)| { + format!( + "{:} -> {:}", + ss.iter() + .map(|s| s.uuid.to_string()) + .collect::>() + .join(", "), + origin + ) + }) + .collect::>() + .join("; "); + warn!("Ambiguous revert request; at least two snapshots scheduled to be reverted into a single origin. The scheduled reverts will not occur. Snapshots: {:}", msg_string); + } + + let links = origins + .intersection(&snaps_to_merge) + .collect::>(); + let ready_to_merge = ready_to_merge + .drain() + .filter(|(origin, snap)| { + if links.contains(origin) || links.contains(snap) { + warn!("A chain of reverts that includes {:} and {:} has been scheduled. The intended order of reverts is ambiguous, the scheduled revert will not occur.", origin, snap); + false + } else { + true + } + }) + .map(|(origin, snap)| { + ( + filesystem_metadata_map.remove(&origin).expect("origin and snap sets are disjoint, have no duplicates"), + filesystem_metadata_map.remove(&snap).expect("origin and snap sets are disjoint, have no duplicates") + ) + }) + .collect::>(); + let (thinpool_name, thinpool_uuid) = format_thinpool_ids(pool_uuid, ThinPoolRole::Pool); let (meta_dev, meta_segments, spare_segments) = setup_metadev( pool_uuid, @@ -1153,46 +1330,86 @@ where thinpool_dev.queue_if_no_space(get_dm())?; } - let (dm_name, dm_uuid) = format_flex_ids(pool_uuid, FlexRole::MetadataVolume); - let mdv_dev = LinearDev::setup( - get_dm(), - &dm_name, - Some(&dm_uuid), - segs_to_table(backstore_device, &mdv_segments), - )?; - let mdv = MetadataVol::setup(pool_uuid, mdv_dev)?; - let filesystem_metadatas = mdv.filesystems()?; - - let filesystems = filesystem_metadatas - .iter() - .filter_map( - |fssave| match StratFilesystem::setup(pool_uuid, &thinpool_dev, fssave) { - Ok(fs) => { - fs.udev_fs_change(pool_name, fssave.uuid, &fssave.name); - Some((Name::new(fssave.name.to_owned()), fssave.uuid, fs)) - }, - Err(err) => { + let mut fs_table = Table::default(); + for (origin, snap) in ready_to_merge { + assert!(!origin.merge.unwrap_or(false)); + let merged = merge(&origin, &snap); + + match StratFilesystem::setup(pool_uuid, &thinpool_dev, &merged) { + Ok(fs) => { + if let Err(e) = set_uuid(&fs.devnode(), merged.uuid) { + error!( + "Could not set the UUID of the XFS filesystem on the Stratis filesystem with UUID {} after revert, reason: {:?}", + merged.uuid, e + ); + }; + fs.udev_fs_change(pool_name, merged.uuid, &merged.name); + + let name = Name::new(merged.name.to_owned()); + if let Err(e) = mdv.save_fs(&name, merged.uuid, &fs) { + error!( + "Could not save MDV for fs with UUID {} and name {} belonging to pool with UUID {} after revert, reason: {:?}", + merged.uuid, merged.name, pool_uuid, e + ); + } + if let Err(e) = mdv.rm_fs(snap.uuid) { + error!( + "Could not remove old MDV for fs with UUID {} belonging to pool with UUID {} after revert, reason: {:?}", + snap.uuid, pool_uuid, e + ); + }; + assert!( + fs_table.insert(name, merged.uuid, fs).is_none(), + "Duplicates already removed when building filesystem_metadata_map" + ); + if let Err(e) = message( + get_dm(), + &thinpool_dev, + &format!("delete {:}", origin.thin_id), + ) { warn!( - "Filesystem specified by metadata {:?} could not be setup, reason: {:?}", - fssave, - err + "Failed to delete space allocated for deleted origin filesystem with UUID {:} and thin id {:}: {:?} after revert", + origin.uuid, origin.thin_id, e ); - None } - }, - ) - .collect::>(); + for fs in filesystem_metadata_map.values_mut() { + if fs.origin.map(|o| o == snap.uuid).unwrap_or(false) { + fs.origin = Some(origin.uuid); + } + } + } + Err(err) => { + warn!( + "Snapshot {:?} could not be reverted into origin {:?}, reason: {:?}", + snap, origin, err + ); + filesystem_metadata_map.insert(*origin.uuid, origin); + filesystem_metadata_map.insert(*snap.uuid, snap); + } + } + } - let mut fs_table = Table::default(); - for (name, uuid, fs) in filesystems { - let evicted = fs_table.insert(name, uuid, fs); - if evicted.is_some() { - let err_msg = "filesystems with duplicate UUID or name specified in metadata"; - return Err(StratisError::Msg(err_msg.into())); + for fssave in filesystem_metadata_map.values() { + match StratFilesystem::setup(pool_uuid, &thinpool_dev, fssave) { + Ok(fs) => { + fs.udev_fs_change(pool_name, fssave.uuid, &fssave.name); + assert!( + fs_table + .insert(Name::new(fssave.name.to_owned()), fssave.uuid, fs) + .is_none(), + "Duplicates already removed when building filesystem_metadata_map" + ); + } + Err(err) => { + warn!( + "Filesystem specified by metadata {:?} could not be setup, reason: {:?}", + fssave, err + ); + } } } - let thin_ids: Vec = filesystem_metadatas.iter().map(|x| x.thin_id).collect(); + let thin_ids: Vec = fs_table.iter().map(|(_, _, fs)| fs.thin_id()).collect(); let thin_pool_status = thinpool_dev.status(get_dm(), DmOptions::default()).ok(); let segments = Segments { meta_segments, @@ -1738,20 +1955,83 @@ where Ok(changed) } - pub fn unset_fs_origin(&mut self, fs_uuid: FilesystemUuid) -> StratisResult { - let changed = { - let (_, fs) = self.get_mut_filesystem_by_uuid(fs_uuid).ok_or_else(|| { - StratisError::Msg(format!("No filesystem with UUID {fs_uuid} found")) - })?; - fs.unset_origin() - }; - if changed { - let (name, fs) = self - .get_filesystem_by_uuid(fs_uuid) - .expect("Looked up above."); - self.mdv.save_fs(&name, fs_uuid, fs)?; + /// Set the filesystem merge scheduled value for filesystem with given UUID. + /// Returns true if the value was changed from the filesystem's, previous + /// value, otherwise false. + pub fn set_fs_merge_scheduled( + &mut self, + fs_uuid: FilesystemUuid, + scheduled: bool, + ) -> StratisResult { + let (_, fs) = self + .get_filesystem_by_uuid(fs_uuid) + .ok_or_else(|| StratisError::Msg(format!("No filesystem with UUID {fs_uuid} found")))?; + + let origin = fs.origin().ok_or_else(|| { + StratisError::Msg(format!( + "Filesystem {fs_uuid} has no origin, revert cannot be scheduled or unscheduled" + )) + })?; + + if fs.merge_scheduled() == scheduled { + return Ok(false); } - Ok(changed) + + if scheduled { + if self + .get_filesystem_by_uuid(origin) + .map(|(_, fs)| fs.merge_scheduled()) + .unwrap_or(false) + { + return Err(StratisError::Msg(format!( + "Filesystem {fs_uuid} is scheduled to replace filesystem {origin}, but filesystem {origin} is already scheduled to replace another filesystem. Since the order in which the filesystems should replace each other is unknown, this operation can not be performed." + ))); + } + + let (others_scheduled, into_scheduled) = + self.filesystems + .iter() + .fold((Vec::new(), Vec::new()), |mut acc, (u, n, f)| { + if f.origin().map(|o| o == origin).unwrap_or(false) && f.merge_scheduled() { + acc.0.push((u, n, f)); + } + if f.origin().map(|o| o == fs_uuid).unwrap_or(false) && f.merge_scheduled() + { + acc.1.push((u, n, f)); + } + acc + }); + + if let Some((n, u, _)) = others_scheduled.first() { + return Err(StratisError::Msg(format!( + "Filesystem {:} with UUID {:} is already scheduled to be reverted into origin filesystem {:}, unwilling to schedule two revert operations on the same origin filesystem.", + n, u, origin, + ))); + } + + if let Some((n, u, _)) = into_scheduled.first() { + return Err(StratisError::Msg(format!( + "Filesystem {:} with UUID {:} is already scheduled to be reverted into this filesystem {:}. The ordering is ambiguous, unwilling to schedule a revert", + n, u, origin, + ))); + } + } + + assert!( + self.get_mut_filesystem_by_uuid(fs_uuid) + .expect("Looked up above") + .1 + .set_merge_scheduled(scheduled) + .expect("fs.origin() is not None"), + "Already returned from this method if value to set is the same as current" + ); + + let (name, fs) = self + .get_filesystem_by_uuid(fs_uuid) + .expect("Looked up above"); + + self.mdv.save_fs(&name, fs_uuid, fs)?; + Ok(true) } } @@ -2886,6 +3166,85 @@ mod tests { test_fs_size_limit, ); } + + /// Verify that destroy_filesystems handles origin and merge + /// scheduled properties correctly when destroying filesystems. + fn test_thindev_with_origins(paths: &[&Path]) { + let default_thin_dev_size = Sectors(2 * IEC::Mi); + let pool_uuid = PoolUuid::new_v4(); + + let devices = get_devices(paths).unwrap(); + + let mut backstore = backstore::v2::Backstore::initialize( + pool_uuid, + devices, + MDADataSize::default(), + None, + ) + .unwrap(); + let mut pool = ThinPool::::new( + pool_uuid, + &ThinPoolSizeParams::new(backstore.available_in_backstore()).unwrap(), + DATA_BLOCK_SIZE, + &mut backstore, + ) + .unwrap(); + let pool_name = "stratis_test_pool"; + let fs_name = "stratis_test_filesystem"; + let fs_uuid = pool + .create_filesystem(pool_name, pool_uuid, fs_name, default_thin_dev_size, None) + .unwrap(); + + let (sn1_uuid, sn1) = pool + .snapshot_filesystem(pool_name, pool_uuid, fs_uuid, "snap1") + .unwrap(); + assert_matches!(sn1.origin(), Some(uuid) => fs_uuid == uuid); + + let (sn2_uuid, sn2) = pool + .snapshot_filesystem(pool_name, pool_uuid, fs_uuid, "snap2") + .unwrap(); + assert_matches!(sn2.origin(), Some(uuid) => fs_uuid == uuid); + + assert!(pool.set_fs_merge_scheduled(fs_uuid, true).is_err()); + assert!(pool.set_fs_merge_scheduled(fs_uuid, false).is_err()); + pool.set_fs_merge_scheduled(sn2_uuid, true).unwrap(); + assert!(pool.set_fs_merge_scheduled(sn1_uuid, true).is_err()); + pool.set_fs_merge_scheduled(sn1_uuid, false).unwrap(); + + assert!(pool + .destroy_filesystems(pool_name, &[sn2_uuid, sn1_uuid].into()) + .is_err()); + assert!(pool + .destroy_filesystems(pool_name, &[fs_uuid, sn1_uuid].into()) + .is_err()); + pool.set_fs_merge_scheduled(sn2_uuid, false).unwrap(); + + retry_operation!(pool.destroy_filesystems(pool_name, &[fs_uuid, sn2_uuid].into())); + + assert_eq!( + pool.get_filesystem_by_uuid(sn1_uuid) + .expect("not destroyed") + .1 + .origin(), + None + ); + } + + #[test] + fn loop_test_thindev_with_origins() { + loopbacked::test_with_spec( + &loopbacked::DeviceLimits::Range(2, 3, None), + test_thindev_with_origins, + ); + } + + #[test] + fn real_test_thindev_with_origins() { + real::test_with_spec( + &real::DeviceLimits::AtLeast(1, None, None), + test_thindev_with_origins, + ); + } } mod v2 { @@ -3779,5 +4138,92 @@ mod tests { test_fs_size_limit, ); } + + /// Verify that destroy_filesystems handles origin and merge + /// scheduled properties correctly when destroying filesystems. + fn test_thindev_with_origins(paths: &[&Path]) { + let default_thin_dev_size = Sectors(2 * IEC::Mi); + let pool_uuid = PoolUuid::new_v4(); + + let devices = get_devices(paths).unwrap(); + + let mut backstore = backstore::v2::Backstore::initialize( + pool_uuid, + devices, + MDADataSize::default(), + None, + ) + .unwrap(); + let mut pool = ThinPool::::new( + pool_uuid, + &ThinPoolSizeParams::new(backstore.available_in_backstore()).unwrap(), + DATA_BLOCK_SIZE, + &mut backstore, + ) + .unwrap(); + let pool_name = "stratis_test_pool"; + let fs_name = "stratis_test_filesystem"; + let fs_uuid = pool + .create_filesystem(pool_name, pool_uuid, fs_name, default_thin_dev_size, None) + .unwrap(); + + let (sn1_uuid, sn1) = pool + .snapshot_filesystem(pool_name, pool_uuid, fs_uuid, "snap1") + .unwrap(); + assert_matches!(sn1.origin(), Some(uuid) => fs_uuid == uuid); + + let (sn2_uuid, sn2) = pool + .snapshot_filesystem(pool_name, pool_uuid, fs_uuid, "snap2") + .unwrap(); + assert_matches!(sn2.origin(), Some(uuid) => fs_uuid == uuid); + + assert!(pool.set_fs_merge_scheduled(fs_uuid, true).is_err()); + assert!(pool.set_fs_merge_scheduled(fs_uuid, false).is_err()); + pool.set_fs_merge_scheduled(sn2_uuid, true).unwrap(); + assert!(pool.set_fs_merge_scheduled(sn1_uuid, true).is_err()); + pool.set_fs_merge_scheduled(sn1_uuid, false).unwrap(); + + assert!(pool + .destroy_filesystems(pool_name, &[sn2_uuid, sn1_uuid].into()) + .is_err()); + assert!(pool + .destroy_filesystems(pool_name, &[fs_uuid, sn1_uuid].into()) + .is_err()); + + let (sn3_uuid, sn3) = pool + .snapshot_filesystem(pool_name, pool_uuid, sn2_uuid, "snap3") + .unwrap(); + assert_matches!(sn3.origin(), Some(uuid) => sn2_uuid == uuid); + + assert!(pool.set_fs_merge_scheduled(sn3_uuid, true).is_err()); + + pool.set_fs_merge_scheduled(sn2_uuid, false).unwrap(); + + retry_operation!(pool.destroy_filesystems(pool_name, &[fs_uuid, sn2_uuid].into())); + + assert_eq!( + pool.get_filesystem_by_uuid(sn1_uuid) + .expect("not destroyed") + .1 + .origin(), + None + ); + } + + #[test] + fn loop_test_thindev_with_origins() { + loopbacked::test_with_spec( + &loopbacked::DeviceLimits::Range(2, 3, None), + test_thindev_with_origins, + ); + } + + #[test] + fn real_test_thindev_with_origins() { + real::test_with_spec( + &real::DeviceLimits::AtLeast(1, None, None), + test_thindev_with_origins, + ); + } } } diff --git a/src/engine/types/actions.rs b/src/engine/types/actions.rs index ca8088404a..dbd45cfcf2 100644 --- a/src/engine/types/actions.rs +++ b/src/engine/types/actions.rs @@ -612,7 +612,7 @@ impl EngineAction for SetDeleteAction { } } -impl Display for SetDeleteAction { +impl Display for SetDeleteAction)> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.changed.is_empty() { write!( @@ -796,6 +796,19 @@ where } } +impl ToDisplay for PropChangeAction { + type Display = PropChangeAction; + + fn to_display(&self) -> PropChangeAction { + match self { + PropChangeAction::Identity => PropChangeAction::Identity, + PropChangeAction::NewValue(v) => { + PropChangeAction::NewValue(format!("a value of {}", v)) + } + } + } +} + impl Display for PropChangeAction where T: Display, diff --git a/tests-fmf/python.fmf b/tests-fmf/python.fmf index 52c8237fbd..0d2e7ea6a6 100644 --- a/tests-fmf/python.fmf +++ b/tests-fmf/python.fmf @@ -8,6 +8,7 @@ require: - python3-dbus - python3-dbus-client-gen - python3-dbus-python-client-gen + - python3-justbytes - python3-psutil - python3-pyudev - python3-tenacity @@ -29,7 +30,7 @@ environment: /legacy/loop: summary: Run Python tests that use loopbacked device framework - test: make -f Makefile tang-tests dump-metadata-tests startup-tests + test: make -f Makefile tang-tests dump-metadata-tests startup-tests revert-tests /v2/udev: summary: Run Python udev tests @@ -37,4 +38,4 @@ environment: /v2/loop: summary: Run Python tests that use loopbacked device framework - test: make -f Makefile tang-tests dump-metadata-tests startup-tests + test: make -f Makefile tang-tests dump-metadata-tests startup-tests revert-tests diff --git a/tests/client-dbus/Makefile b/tests/client-dbus/Makefile index db20f1460b..8a9cddbf7b 100644 --- a/tests/client-dbus/Makefile +++ b/tests/client-dbus/Makefile @@ -38,3 +38,7 @@ filesystem-predict-tests: .PHONY: dump-metadata-tests dump-metadata-tests: python3 -m unittest ${UNITTEST_OPTS} tests.udev.test_dump + +.PHONY: revert-tests +revert-tests: + python3 -m unittest ${UNITTEST_OPTS} tests.udev.test_revert diff --git a/tests/client-dbus/src/stratisd_client_dbus/_introspect.py b/tests/client-dbus/src/stratisd_client_dbus/_introspect.py index 973c53a2db..f518c26857 100644 --- a/tests/client-dbus/src/stratisd_client_dbus/_introspect.py +++ b/tests/client-dbus/src/stratisd_client_dbus/_introspect.py @@ -124,6 +124,7 @@ + diff --git a/tests/client-dbus/tests/udev/test_revert.py b/tests/client-dbus/tests/udev/test_revert.py new file mode 100644 index 0000000000..db010b5cd2 --- /dev/null +++ b/tests/client-dbus/tests/udev/test_revert.py @@ -0,0 +1,296 @@ +# Copyright 2024 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Test reverting a filesystem. +""" + +# isort: STDLIB +import os +import subprocess +import tempfile + +# isort: THIRDPARTY +from justbytes import Range + +# isort: FIRSTPARTY +from dbus_python_client_gen import DPClientInvocationError + +# isort: LOCAL +from stratisd_client_dbus import Filesystem +from stratisd_client_dbus._constants import TOP_OBJECT + +from ._utils import ( + Manager, + Pool, + ServiceContextManager, + UdevTest, + create_pool, + get_object, + random_string, + settle, +) + + +def write_file(mountdir, filename): + """ + Write a sentinel value derived from the filename to a file. + """ + with open(os.path.join(mountdir, filename), encoding="utf-8", mode="w") as fd: + print(filename, file=fd, end="") + + +class TestRevert(UdevTest): + """ + Test reverting a filesystem. + """ + + def read_file(self, mountdir, filename): + """ + Read a file and verify that it contains the expected sentinel value. + """ + with open(os.path.join(mountdir, filename), encoding="utf-8") as fd: + self.assertEqual(fd.read(), filename) + + def test_revert(self): # pylint: disable=too-many-locals + """ + Schedule a revert and verify that it has succeeded when the pool is + restarted. + + First simply stop and start stratisd. In this way it is possible to + verify that when a revert fails, the pool is setup, without the revert. + """ + mountdir = tempfile.mkdtemp("_stratis_mnt") + + with ServiceContextManager(): + device_tokens = self._lb_mgr.create_devices(2) + + pool_name = random_string(5) + + (_, (pool_object_path, _)) = create_pool( + pool_name, self._lb_mgr.device_files(device_tokens) + ) + + fs_name = "fs1" + fs_size = Range(1024**3) + ((_, fs_object_paths), return_code, message) = ( + Pool.Methods.CreateFilesystems( + get_object(pool_object_path), + {"specs": [(fs_name, (True, str(fs_size.magnitude)), (False, ""))]}, + ) + ) + + if return_code != 0: + raise RuntimeError( + f"Failed to create a requested filesystem: {message}" + ) + + settle() + + filepath = f"/dev/stratis/{pool_name}/{fs_name}" + subprocess.check_call(["mount", filepath, mountdir]) + + file1 = "file1.txt" + write_file(mountdir, file1) + + snap_name = "snap1" + ((_, snap_object_path), return_code, message) = ( + Pool.Methods.SnapshotFilesystem( + get_object(pool_object_path), + {"origin": fs_object_paths[0][0], "snapshot_name": snap_name}, + ) + ) + + if return_code != 0: + raise RuntimeError(f"Failed to create requested snapshot: {message}") + + file2 = "file2.txt" + write_file(mountdir, file2) + + Filesystem.Properties.MergeScheduled.Set(get_object(snap_object_path), True) + subprocess.check_call(["umount", mountdir]) + + self.assertTrue(os.path.exists(f"/dev/stratis/{pool_name}/{snap_name}")) + + # Do not stop the pool, but do stop stratisd. Since the devices were + # not torn down, the merge will fail and both filesystems will be set + # up as they were previously. + with ServiceContextManager(): + self.wait_for_pools(1) + + settle() + + subprocess.check_call(["mount", filepath, mountdir]) + + self.read_file(mountdir, file1) + self.read_file(mountdir, file2) + + subprocess.check_call(["umount", mountdir]) + + # Now stop the pool, which should tear down the devices + (_, return_code, message) = Manager.Methods.StopPool( + get_object(TOP_OBJECT), + { + "id": pool_name, + "id_type": "name", + }, + ) + + if return_code != 0: + raise RuntimeError(f"Failed to stop the pool {pool_name}: {message}") + + (_, return_code, message) = Manager.Methods.StartPool( + get_object(TOP_OBJECT), + { + "id": pool_name, + "id_type": "name", + "unlock_method": (False, ""), + "key_fd": (False, 0), + }, + ) + + if return_code != 0: + raise RuntimeError(f"Failed to start the pool {pool_name}: {message}") + + self.wait_for_pools(1) + + settle() + + subprocess.check_call(["mount", filepath, mountdir]) + + self.read_file(mountdir, file1) + + self.assertFalse(os.path.exists(os.path.join(mountdir, file2))) + self.assertFalse(os.path.exists(f"/dev/stratis/{pool_name}/{snap_name}")) + + subprocess.check_call(["umount", mountdir]) + + def test_revert_snapshot_chain(self): # pylint: disable=too-many-locals + """ + Make a chain of snapshots, schedule excess reverts and verify that + those yield an error, and then revert the middle link. + + Verify that the snapshot link now points to the origin. + """ + mountdir = tempfile.mkdtemp("_stratis_mnt") + + with ServiceContextManager(): + device_tokens = self._lb_mgr.create_devices(2) + + pool_name = random_string(5) + + (_, (pool_object_path, _)) = create_pool( + pool_name, self._lb_mgr.device_files(device_tokens) + ) + + fs_name = "fs1" + fs_size = Range(1024**3) + ((_, fs_object_paths), return_code, message) = ( + Pool.Methods.CreateFilesystems( + get_object(pool_object_path), + {"specs": [(fs_name, (True, str(fs_size.magnitude)), (False, ""))]}, + ) + ) + + if return_code != 0: + raise RuntimeError( + f"Failed to create a requested filesystem: {message}" + ) + + settle() + + filepath = f"/dev/stratis/{pool_name}/{fs_name}" + subprocess.check_call(["mount", filepath, mountdir]) + + file1 = "file1.txt" + write_file(mountdir, file1) + + snap_name_1 = "snap1" + ((_, snap_object_path_1), return_code, message) = ( + Pool.Methods.SnapshotFilesystem( + get_object(pool_object_path), + {"origin": fs_object_paths[0][0], "snapshot_name": snap_name_1}, + ) + ) + + if return_code != 0: + raise RuntimeError(f"Failed to create requested snapshot: {message}") + + file2 = "file2.txt" + write_file(mountdir, file2) + + Filesystem.Properties.MergeScheduled.Set( + get_object(snap_object_path_1), True + ) + subprocess.check_call(["umount", mountdir]) + + self.assertTrue(os.path.exists(f"/dev/stratis/{pool_name}/{snap_name_1}")) + + snap_name_2 = "snap2" + ((_, snap_object_path_2), return_code, message) = ( + Pool.Methods.SnapshotFilesystem( + get_object(pool_object_path), + {"origin": snap_object_path_1, "snapshot_name": snap_name_2}, + ) + ) + + if return_code != 0: + raise RuntimeError(f"Failed to create requested snapshot: {message}") + + settle() + + self.assertTrue(os.path.exists(f"/dev/stratis/{pool_name}/{snap_name_2}")) + with self.assertRaises(DPClientInvocationError): + Filesystem.Properties.MergeScheduled.Set( + get_object(snap_object_path_2), True + ) + + # Now stop the pool, which should tear down the devices + (_, return_code, message) = Manager.Methods.StopPool( + get_object(TOP_OBJECT), + { + "id": pool_name, + "id_type": "name", + }, + ) + + if return_code != 0: + raise RuntimeError(f"Failed to stop the pool {pool_name}: {message}") + + (_, return_code, message) = Manager.Methods.StartPool( + get_object(TOP_OBJECT), + { + "id": pool_name, + "id_type": "name", + "unlock_method": (False, ""), + "key_fd": (False, 0), + }, + ) + + if return_code != 0: + raise RuntimeError(f"Failed to start the pool {pool_name}: {message}") + + self.wait_for_pools(1) + + settle() + + subprocess.check_call(["mount", filepath, mountdir]) + + self.read_file(mountdir, file1) + + self.assertFalse(os.path.exists(os.path.join(mountdir, file2))) + self.assertFalse(os.path.exists(f"/dev/stratis/{pool_name}/{snap_name_1}")) + self.assertTrue(os.path.exists(f"/dev/stratis/{pool_name}/{snap_name_2}")) + + subprocess.check_call(["umount", mountdir])