Skip to content

Commit

Permalink
[WIP] Add support for /home reuse to automatic partitioning
Browse files Browse the repository at this point in the history
We use destroy only for plain partition mountpoints as we can't destroy
lv or btrfs subvolume - it would not free space for the new one (and its
vg or btrfs volume). For lvs and btrfs subvols we reuse reformat from
mount point assignment (real erase would be perhaps better, even for
mount point assignment as recreating the device complicates the logic)

TODO:
+ unify biosboot partition with other stuff (configurable)
+ look at all roots
+ test with plain
+ test with lvm
+ test with thin lvm
+ --remove should fail silently?
+ kickstart support (--erase --reuse --remove)
- kickstart tests
- reuse fstab mount options (/home is missing compression)
+ add checks that the device found is unique (including biosboot/efi)
- checks for recreate only on plain partition?
- create check for usability of the /home reuse (for UI)
- update unit tests
- luks ?
- efi partitions, we have only biosboot so far
- update for device_id: #5435
- add better mount point assignment kickstart tests, we update code
there (fedora, lvm, plain)
  • Loading branch information
rvykydal committed Aug 9, 2024
1 parent 9a2a254 commit ef788a0
Show file tree
Hide file tree
Showing 7 changed files with 309 additions and 101 deletions.
2 changes: 1 addition & 1 deletion pyanaconda/core/kickstart/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

# Supported kickstart commands.
from pykickstart.commands.authselect import F28_Authselect as Authselect
from pykickstart.commands.autopart import F38_AutoPart as AutoPart
from pykickstart.commands.autopart import F41_AutoPart as AutoPart
from pykickstart.commands.bootloader import F39_Bootloader as Bootloader
from pykickstart.commands.btrfs import F23_BTRFS as BTRFS
from pykickstart.commands.cdrom import FC3_Cdrom as Cdrom
Expand Down
53 changes: 53 additions & 0 deletions pyanaconda/modules/common/structures/partitioning.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ def __init__(self):
self._partitioning_scheme = conf.storage.default_scheme
self._file_system_type = ""
self._excluded_mount_points = []
self._erased_mount_points = []
self._reused_mount_points = []
self._removed_mount_points = []
self._hibernation = False

self._encrypted = False
Expand Down Expand Up @@ -111,6 +114,56 @@ def hibernation(self, value: Bool):
def excluded_mount_points(self, mount_points: List[Str]):
self._excluded_mount_points = mount_points

@property
def erased_mount_points(self) -> List[Str]:
"""Erased mount points.
Erase and reuse existing devices for the mount points.
For example: /
:return: a list of mount points
"""
return self._erased_mount_points

@erased_mount_points.setter
def erased_mount_points(self, mount_points: List[Str]):
self._erased_mount_points = mount_points

@property
def reused_mount_points(self) -> List[Str]:
"""Reused mount points.
Reuse existing devices for the mount points.
For example: /home
:return: a list of mount points
"""
return self._reused_mount_points

@reused_mount_points.setter
def reused_mount_points(self, mount_points: List[Str]):
self._reused_mount_points = mount_points

@property
def removed_mount_points(self) -> List[Str]:
"""Removed mount points.
Destroy the devices for the mount points if they exist.
Supported only for plain partition mount points
For example: /boot
:return: a list of mount points
"""
return self._removed_mount_points

@removed_mount_points.setter
def removed_mount_points(self, mount_points: List[Str]):
self._removed_mount_points = mount_points

@property
def encrypted(self) -> Bool:
"""Should devices be encrypted?
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,10 @@ def process_kickstart(self, data):
if data.autopart.noswap:
request.excluded_mount_points.append("swap")

request.erased_mount_points = data.autopart.erase
request.reused_mount_points = data.autopart.reuse
request.removed_mount_points = data.autopart.remove

request.hibernation = data.autopart.hibernation

if data.autopart.encrypted:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,22 @@
#
from blivet.partitioning import do_partitioning, grow_lvm
from blivet.static_data import luks_data
from blivet.errors import StorageError

from pyanaconda.anaconda_loggers import get_module_logger
from pyanaconda.core.i18n import _
from pyanaconda.modules.common.structures.partitioning import PartitioningRequest
from pyanaconda.modules.storage.partitioning.automatic.noninteractive_partitioning import \
NonInteractivePartitioningTask
from pyanaconda.modules.storage.partitioning.manual.utils import \
reformat_device
from pyanaconda.modules.storage.partitioning.interactive.utils import destroy_device
from pyanaconda.modules.storage.partitioning.automatic.utils import get_candidate_disks, \
schedule_implicit_partitions, schedule_volumes, schedule_partitions, get_pbkdf_args, \
get_default_partitioning, get_part_spec, get_disks_for_implicit_partitions
from pyanaconda.core.storage import suggest_swap_size


log = get_module_logger(__name__)

__all__ = ["AutomaticPartitioningTask"]
Expand Down Expand Up @@ -56,6 +62,86 @@ def _get_initialization_config(self):
config.clear_non_existent = True
return config

def _get_mountpoint_device(self, storage, mountpoint, required=True):
devices = []
# TODO add support for EFI
if mountpoint == "biosboot":
for device in storage.devices:
if device.format.type == "biosboot":
devices.append(device)
else:
for root in storage.roots:
if mountpoint in root.mounts:
devices.append(root.mounts[mountpoint])
if len(devices) > 1:
raise StorageError(_("Multiple devices found for mountpoint '{}': {}")
.format(mountpoint,
", ".join([device.name for device in devices])))
if not devices:
if required:
raise StorageError(_("No devices found for mountpoint '{}'").format(mountpoint))
else:
return None

return devices[0]

def _reused_devices_mountpoints(self, request):
return request.reused_mount_points + request.erased_mount_points

def _get_reused_device_names(self, storage):
reused_devices = {}
for mountpoint in self._reused_devices_mountpoints(self._request):
device = self._get_mountpoint_device(storage, mountpoint)
reused_devices[device.name] = mountpoint
return reused_devices

def _erase_mountpoint(self, storage, mountpoint):
device = self._get_mountpoint_device(storage, mountpoint)
log.debug("DDDDD erase device %s for mountpoint: %s", device, mountpoint)
reused_devices = self._get_reused_device_names(storage)
# FIXME it would be better to really erase the device
reformat_device(storage, device, dependencies=reused_devices)

def _remove_mountpoint(self, storage, mountpoint):
device = self._get_mountpoint_device(storage, mountpoint, required=False)
if device:
log.debug("DDDDD remove device %s for mountpoint %s", device, mountpoint)
destroy_device(storage, device)
else:
log.debug("DDDDD device to be removed for mountpoint %s not found", mountpoint)

def _clear_partitions(self, storage):
super()._clear_partitions(storage)

log.debug("DDDDD storage.roots.mounts %s", [root.mounts for root in storage.roots])

# TODO check that partitioning scheme matches - do it earlier in the
# check but also here?

# TODO maybe move to _configure_partitioning? Seems safer here.
for mountpoint in self._request.removed_mount_points:
self._remove_mountpoint(storage, mountpoint)
for mountpoint in self._request.erased_mount_points:
self._erase_mountpoint(storage, mountpoint)

def _schedule_reused_mountpoint(self, storage, mountpoint):
device = self._get_mountpoint_device(storage, mountpoint)
log.debug("DDDDD add mount device request for reused mountpoint: %s device: %s",
mountpoint, device)
device.format.mountpoint = mountpoint

def _schedule_erased_mountpoint(self, storage, mountpoint):
old_device = self._get_mountpoint_device(storage, mountpoint)
# FIXME: use rather erase for btrfs reformat (no device recreation)
# The device might have been recreated (btrfs)
device = storage.devicetree.resolve_device(old_device.name)
if device:
log.debug("DDDDD add mount device request for erased mountpoint: %s device: %s",
mountpoint, device)
device.format.mountpoint = mountpoint
else:
log.debug("DDDDD device for erased mountpoint %s not found", mountpoint)

def _configure_partitioning(self, storage):
"""Configure the partitioning.
Expand Down Expand Up @@ -88,7 +174,15 @@ def _configure_partitioning(self, storage):
requests = self._get_partitioning(storage, scheme, self._request)

# Do the autopart.
self._do_autopart(storage, scheme, requests, encrypted, luks_format_args)
create_implicit_partitions = not self._implicit_partitions_reused(storage, self._request)
self._do_autopart(storage, scheme, requests, encrypted, luks_format_args,
create_implicit_partitions)

for mountpoint in self._request.reused_mount_points:
self._schedule_reused_mountpoint(storage, mountpoint)
for mountpoint in self._request.erased_mount_points:
self._schedule_erased_mountpoint(storage, mountpoint)


@staticmethod
def _get_luks_format_args(storage, request):
Expand Down Expand Up @@ -139,8 +233,11 @@ def _get_partitioning(storage, scheme, request: PartitioningRequest):
if spec.schemes and scheme not in spec.schemes:
continue

# Skip excluded mount points.
if (spec.mountpoint or spec.fstype) in request.excluded_mount_points:
# Skip excluded or reused mount points.
skipped = request.excluded_mount_points
skipped.extend(request.reused_mount_points)
skipped.extend(request.erased_mount_points)
if (spec.mountpoint or spec.fstype) in skipped:
continue

# Detect swap.
Expand Down Expand Up @@ -168,8 +265,20 @@ def _get_partitioning(storage, scheme, request: PartitioningRequest):

return specs

def _implicit_partitions_reused(self, storage, request):
for mountpoint in self._reused_devices_mountpoints(request):
device = self._get_mountpoint_device(storage, mountpoint)
if hasattr(device, "vg"):
log.debug("DDDDD reusing volume group %s", device.vg)
return True
if hasattr(device, "volume"):
log.debug("DDDDD reusing volume %s", device.volume)
return True
return False

@staticmethod
def _do_autopart(storage, scheme, requests, encrypted=False, luks_fmt_args=None):
def _do_autopart(storage, scheme, requests, encrypted=False, luks_fmt_args=None,
create_implicit_partitions=True):
"""Perform automatic partitioning.
:param storage: an instance of Blivet
Expand All @@ -191,8 +300,10 @@ def _do_autopart(storage, scheme, requests, encrypted=False, luks_fmt_args=None)
log.debug("candidate disks: %s", [d.name for d in disks])

# Schedule implicit partitions.
extra_disks = get_disks_for_implicit_partitions(disks, scheme, requests)
devs = schedule_implicit_partitions(storage, extra_disks, scheme, encrypted, luks_fmt_args)
devs = []
if create_implicit_partitions:
extra_disks = get_disks_for_implicit_partitions(disks, scheme, requests)
devs = schedule_implicit_partitions(storage, extra_disks, scheme, encrypted, luks_fmt_args)

# Schedule requested partitions.
devs = schedule_partitions(storage, disks, devs, scheme, requests, encrypted, luks_fmt_args)
Expand Down
3 changes: 2 additions & 1 deletion pyanaconda/modules/storage/partitioning/automatic/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,8 @@ def schedule_partitions(storage, disks, implicit_devices, scheme, requests, encr
parents=dev)
storage.create_device(luks_dev)

if scheme in (AUTOPART_TYPE_LVM, AUTOPART_TYPE_LVM_THINP, AUTOPART_TYPE_BTRFS):
if scheme in (AUTOPART_TYPE_LVM, AUTOPART_TYPE_LVM_THINP, AUTOPART_TYPE_BTRFS) and \
implicit_devices:
# doing LVM/BTRFS -- make sure the newly created partition fits in some
# free space together with one of the implicitly requested partitions
smallest_implicit = sorted(implicit_devices, key=lambda d: d.size)[0]
Expand Down
101 changes: 8 additions & 93 deletions pyanaconda/modules/storage/partitioning/manual/manual_partitioning.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from pyanaconda.core.i18n import _
from pyanaconda.modules.storage.partitioning.automatic.noninteractive_partitioning import \
NonInteractivePartitioningTask
from pyanaconda.modules.storage.partitioning.manual.utils import reformat_device

log = get_module_logger(__name__)

Expand Down Expand Up @@ -77,52 +78,13 @@ def _setup_mount_point(self, storage, mount_data):
)

if reformat:
if format_type:
fmt = get_format(format_type)

if not fmt:
raise StorageError(
_("Unknown or invalid format '{}' specified for "
"device '{}'").format(format_type, device.name)
)
else:
old_fmt = device.format

if not old_fmt or old_fmt.type is None:
raise StorageError(_("No format on device '{}'").format(device.name))

fmt = get_format(old_fmt.type)

if device.raw_device.type in ("btrfs volume", "btrfs subvolume"):
# 'Format', or rather clear the device by recreating it

# recreating @device will remove all nested subvolumes of it, we cannot allow
# using these nested subvolumes for other MountPointRequest without also
# re-creating them
if device.raw_device.type == "btrfs volume":
dep_subvolumes = device.raw_device.subvolumes
elif device.raw_device.type == "btrfs subvolume":
dep_subvolumes = [sub.device_id for sub in device.raw_device.volume.subvolumes
if sub.depends_on(device.raw_device)]
problem_subvolumes = [req for req in self._requests if (req.mount_point
and not req.reformat
and req.device_spec in
dep_subvolumes)]
if problem_subvolumes:
err = (_("{} mounted as {}").format(dep.device_spec,
dep.mount_point) for dep in problem_subvolumes)
raise StorageError(
_("Reformatting the '{}' subvolume will remove the following nested "
"subvolumes which cannot be reused: {}").format(device.raw_device.name,
", ".join(err)))
device = self._recreate_btrfs_device(storage, device_spec)
mount_data.mount_options = device.format.options
else:
storage.format_device(device, fmt)

# make sure swaps end up in /etc/fstab
if fmt.type == "swap":
storage.add_fstab_swap(device)
requested_devices = dict(((req.device_spec, req.mount_point) for req in self._requests))
device, mount_options = reformat_device(storage,
device,
format_type,
dependencies=requested_devices)
if mount_options is not None:
mount_data.mount_options = mount_options

# add "mounted" swaps to fstab
if device.format.type == "swap" and mount_point == "swap":
Expand All @@ -134,50 +96,3 @@ def _setup_mount_point(self, storage, mount_data):

device.format.create_options = mount_data.format_options
device.format.options = mount_data.mount_options

def _recreate_btrfs_volume(self, storage, device):
"""Recreate a btrfs volume device by destroying and adding it.
:param storage: an instance of the Blivet's storage object
:param device: a BtrfsVolumeDevice to recreate
"""
if device.children:
raise StorageError(
_("Cannot reformat Btrfs volume '{}' with "
"existing subvolumes").format(device.name))
storage.destroy_device(device)
for parent in device.parents:
storage.format_device(parent, get_format("btrfs"))
new_btrfs = storage.new_btrfs(parents=device.parents[:],
name=device.name)
storage.create_device(new_btrfs)
return new_btrfs

def _recreate_btrfs_subvolume(self, storage, device):
"""Recreate a btrfs subvolume device by destroying and adding it.
:param storage: an instance of the Blivet's storage object
:param device: a BtrfsSubVolumeDevice to recreate
"""
storage.recursive_remove(device)
new_btrfs = storage.new_btrfs(parents=device.parents[:],
name=device.name,
subvol=True)
storage.create_device(new_btrfs)
return new_btrfs

def _recreate_btrfs_device(self, storage, dev_spec):
"""Recreate a device by destroying and adding it.
:param storage: an instance of the Blivet's storage object
:param dev_spec: a string describing a block device to be recreated
"""
device = storage.devicetree.get_device_by_device_id(dev_spec)

if device.type == "btrfs volume":
# can't use device factory for just the volume
return self._recreate_btrfs_volume(storage, device)
elif device.type == "btrfs subvolume":
# using the factory for subvolumes in some cases removes
# the volume too, we don't want that
return self._recreate_btrfs_subvolume(storage, device)
Loading

0 comments on commit ef788a0

Please sign in to comment.