From 804c4431fa70611035f88911f64a6e2206e7f368 Mon Sep 17 00:00:00 2001 From: Marius Vollmer Date: Thu, 15 Aug 2024 14:38:59 +0300 Subject: [PATCH] WIP - poll with python This reduces the roundtrips and allows us to maintain "temporary" mount points for btrfs filesystems that are not otherwise mounted. The process will be shutdown when the cockpit session is closed by cockpit-ws, so we get reliable cleanup. --- pkg/storaged/btrfs/btrfs-tool.py | 198 ++++++++++++++++++++++++ pkg/storaged/client.js | 177 ++++++++------------- pkg/storaged/filesystem/mismounting.jsx | 4 + pkg/storaged/utils.js | 3 + test/verify/check-storage-btrfs | 14 +- 5 files changed, 284 insertions(+), 112 deletions(-) create mode 100755 pkg/storaged/btrfs/btrfs-tool.py diff --git a/pkg/storaged/btrfs/btrfs-tool.py b/pkg/storaged/btrfs/btrfs-tool.py new file mode 100755 index 000000000000..6f78a810ed3e --- /dev/null +++ b/pkg/storaged/btrfs/btrfs-tool.py @@ -0,0 +1,198 @@ +#! /usr/bin/python3 + +# btrfs-tool -- Query and monitor btrfs filesystems +# +# This program monitors all btrfs filesystems and reports their +# subvolumes and other things. +# +# It can do that continously, or as a one shot operation. The tool +# mounts btrfs filesystems as necessary to retrieve the requested +# information, but does it in a polite way: they are mounted once and +# then left mounted until that is no longer needed. Typically, you +# might see some mounts when a Cockpit session starts, and the +# corresponding unmounts when it ends. +# +# This tool can be run multiple times concurrently, and it wont get +# confused. + +import contextlib +import subprocess +import json +import re +import sys +import time +import os +import fcntl +import signal + +TMP_MP_DIR = "/var/lib/cockpit/btrfs" + +def ensure_tmp_mp_dir(): + os.makedirs(TMP_MP_DIR, mode=0o700, exist_ok=True) + +@contextlib.contextmanager +def atomic_file(path): + fd = os.open(path, os.O_RDWR | os.O_CREAT) + fcntl.flock(fd, fcntl.LOCK_EX) + data = os.read(fd, 100000) + blob = json.loads(data) if len(data) > 0 else { } + try: + yield blob + data = json.dumps(blob).encode() + b"\n" + os.lseek(fd, 0, os.SEEK_SET) + os.truncate(fd, 0) + os.write(fd, data) + finally: + os.close(fd) + +def list_filesystems(): + output = json.loads(subprocess.check_output(["lsblk", "-Jplno", "NAME,FSTYPE,UUID,MOUNTPOINTS"])) + filesystems = {} + for b in output['blockdevices']: + if b['fstype'] == "btrfs": + uuid = b['uuid'] + mps = list(filter(lambda x: x is not None and not x.startswith(TMP_MP_DIR), b['mountpoints'])) + if uuid not in filesystems: + filesystems[uuid] = { 'uuid': uuid, 'devices': [ b['name'] ], 'mountpoints': mps } + else: + filesystems[uuid]['devices'] += [ b['name'] ] + filesystems[uuid]['mountpoints'] += mps + return filesystems + +tmp_mountpoints = set() + +def add_tmp_mountpoint(uuid, dev): + global tmp_mountpoints + if uuid not in tmp_mountpoints: + sys.stderr.write(f"ADDING {uuid}\n") + tmp_mountpoints.add(uuid) + ensure_tmp_mp_dir() + with atomic_file(TMP_MP_DIR + "/db") as db: + if uuid in db and db[uuid] > 0: + db[uuid] += 1 + else: + db[uuid] = 1 + dir = TMP_MP_DIR + "/" + uuid + sys.stderr.write(f"MOUNTING {dir}\n") + os.makedirs(dir, exist_ok=True) + subprocess.check_call(["mount", dev, dir]) + +def remove_tmp_mountpoint(uuid): + global tmp_mountpoints + if uuid in tmp_mountpoints: + sys.stderr.write(f"REMOVING {uuid}\n") + tmp_mountpoints.remove(uuid) + ensure_tmp_mp_dir() + with atomic_file(TMP_MP_DIR + "/db") as db: + if db[uuid] == 1: + dir = TMP_MP_DIR + "/" + uuid + try: + sys.stderr.write(f"UNMOUNTING {dir}\n") + subprocess.check_call(["umount", dir]) + subprocess.check_call(["rmdir", dir]) + except: + # XXX - log error, try harder? + pass + del db[uuid] + else: + db[uuid] -= 1 + +def remove_all_tmp_mountpoints(): + for mp in set(tmp_mountpoints): + remove_tmp_mountpoint(mp) + +def ensure_mount_point(fs): + if len(fs['mountpoints']) > 0: + remove_tmp_mountpoint(fs['uuid']) + return fs['mountpoints'][0] + else: + add_tmp_mountpoint(fs['uuid'], fs['devices'][0]) + return TMP_MP_DIR + "/" + fs['uuid'] + +def get_subvolume_info(mp): + lines = subprocess.check_output(["btrfs", "subvolume", "list", "-apuq", mp]).splitlines() + subvols = [] + for line in lines: + match = re.match(b"ID (\\d+).*parent (\\d+).*parent_uuid (.*)uuid (.*) path (/)?(.*)", line); + if match: + subvols += [ + { + 'pathname': match[6].decode(errors='replace'), + 'id': int(match[1]), + 'parent': int(match[2]), + 'uuid': match[4].decode(), + 'parent_uuid': None if match[3][0] == ord("-") else match[3].decode().strip() + } + ] + return subvols + +def get_default_subvolume(mp): + output = subprocess.check_output(["btrfs", "subvolume", "get-default", mp]) + match = re.match(b"ID (\\d+).*", output); + if match: + return int(match[1]); + else: + return None + +def get_usages(uuid): + output = subprocess.check_output(["btrfs", "filesystem", "show", "--raw", uuid]) + usages = {} + for line in output.splitlines(): + match = re.match(b".*used\\s+(\\d+)\\s+path\\s+([\\w/]+).*", line) + if match: + usages[match[2].decode()] = int(match[1]); + return usages; + +def poll(): + sys.stderr.write("POLL\n") + filesystems = list_filesystems() + info = { } + for fs in filesystems.values(): + mp = ensure_mount_point(fs) + if mp: + try: + info[fs['uuid']] = { + 'subvolumes': get_subvolume_info(mp), + 'default_subvolume': get_default_subvolume(mp), + 'usages': get_usages(fs['uuid']), + } + except: + # XXX - export error message? + pass + return info + +def cmd_monitor(): + old_infos = poll() + sys.stdout.write(json.dumps(old_infos) + "\n") + sys.stdout.flush() + while True: + time.sleep(5.0) + new_infos = poll() + if new_infos != old_infos: + sys.stdout.write(json.dumps(new_infos) + "\n") + sys.stdout.flush() + old_infos = new_infos + +def cmd_poll(): + infos = poll() + sys.stdout.write(json.dumps(infos) + "\n") + sys.stdout.flush() + +def cmd(args): + if len(args) > 1: + if args[1] == "poll": + cmd_poll() + elif args[1] == "monitor": + cmd_monitor() + +def main(args): + signal.signal(signal.SIGTERM, lambda _signo, _stack: sys.exit(0)) + try: + cmd(args) + except Exception as err: + sys.stderr.write(str(err) + "\n") + sys.exit(1) + finally: + remove_all_tmp_mountpoints() + +main(sys.argv) diff --git a/pkg/storaged/client.js b/pkg/storaged/client.js index a75749d42b73..75627ba286b9 100644 --- a/pkg/storaged/client.js +++ b/pkg/storaged/client.js @@ -39,6 +39,8 @@ import { export_mount_point_mapping } from "./anaconda.jsx"; import { dequal } from 'dequal/lite'; +import btrfs_tool_py from "./btrfs/btrfs-tool.py"; + /* STORAGED CLIENT */ @@ -200,114 +202,6 @@ client.swap_sizes = instance_sampler([{ name: "swapdev.length" }, { name: "swapdev.free" }, ], "direct"); -export async function btrfs_poll() { - const usage_regex = /used\s+(?\d+)\s+path\s+(?[\w/]+)/; - if (!client.uuids_btrfs_subvols) - client.uuids_btrfs_subvols = { }; - if (!client.uuids_btrfs_usage) - client.uuids_btrfs_usage = { }; - if (!client.uuids_btrfs_default_subvol) - client.uuids_btrfs_default_subvol = { }; - if (!client.uuids_btrfs_volume) - return; - - if (!client.superuser.allowed || !client.features.btrfs) { - return; - } - - const uuids_subvols = { }; - const uuids_usage = { }; - const btrfs_default_subvol = { }; - for (const uuid of Object.keys(client.uuids_btrfs_volume)) { - const blocks = client.uuids_btrfs_blocks[uuid]; - if (!blocks) - continue; - - // In multi device setups MountPoints can be on either of the block devices, so try them all. - const MountPoints = blocks.map(block => { - return client.blocks_fsys[block.path]; - }).map(block_fsys => block_fsys.MountPoints).reduce((accum, current) => accum.concat(current)); - const mp = MountPoints[0]; - if (mp) { - const mount_point = utils.decode_filename(mp); - try { - // HACK: UDisks GetSubvolumes method uses `subvolume list -p` which - // does not show the full subvolume path which we want to show in the UI - // - // $ btrfs subvolume list -p /run/butter - // ID 256 gen 7 parent 5 top level 5 path one - // ID 257 gen 7 parent 256 top level 256 path two - // ID 258 gen 7 parent 257 top level 257 path two/three/four - // - // $ btrfs subvolume list -ap /run/butter - // ID 256 gen 7 parent 5 top level 5 path /one - // ID 257 gen 7 parent 256 top level 256 path one/two - // ID 258 gen 7 parent 257 top level 257 path /one/two/three/four - const output = await cockpit.spawn(["btrfs", "subvolume", "list", "-apuq", mount_point], { superuser: "require", err: "message" }); - const subvols = [{ pathname: "/", id: 5, parent: null }]; - for (const line of output.split("\n")) { - const m = line.match(/ID (\d+).*parent (\d+).*parent_uuid (.*)uuid (.*) path (\/)?(.*)/); - if (m) { - // The parent uuid is the uuid of which this subvolume is a snapshot. - // https://github.com/torvalds/linux/blob/8d025e2092e29bfd13e56c78e22af25fac83c8ec/include/uapi/linux/btrfs.h#L885 - let parent_uuid = m[3].trim(); - // BTRFS_UUID_SIZE is 16 - parent_uuid = parent_uuid.length < 16 ? null : parent_uuid; - subvols.push({ pathname: m[6], id: Number(m[1]), parent: Number(m[2]), uuid: m[4], parent_uuid }); - } - } - uuids_subvols[uuid] = subvols; - } catch (err) { - console.warn(`unable to obtain subvolumes for mount point ${mount_point}`, err); - } - - // HACK: Obtain the default subvolume, required for mounts in which do not specify a subvol and subvolid. - // In the future can be obtained via UDisks, it requires the btrfs partition to be mounted somewhere. - // https://github.com/storaged-project/udisks/commit/b6966b7076cd837f9d307eef64beedf01bc863ae - try { - const output = await cockpit.spawn(["btrfs", "subvolume", "get-default", mount_point], { superuser: "require", err: "message" }); - const id_match = output.match(/ID (\d+).*/); - if (id_match) - btrfs_default_subvol[uuid] = Number(id_match[1]); - } catch (err) { - console.warn(`unable to obtain default subvolume for mount point ${mount_point}`, err); - } - - // HACK: UDisks should expose a better btrfs API with btrfs device information - // https://github.com/storaged-project/udisks/issues/1232 - // TODO: optimise into just parsing one `btrfs filesystem show`? - try { - const usage_output = await cockpit.spawn(["btrfs", "filesystem", "show", "--raw", uuid], { superuser: "require", err: "message" }); - const usages = {}; - for (const line of usage_output.split("\n")) { - const match = usage_regex.exec(line); - if (match) { - const { used, device } = match.groups; - usages[device] = used; - } - } - uuids_usage[uuid] = usages; - } catch (err) { - console.warn(`btrfs filesystem show ${uuid}`, err); - } - } else { - uuids_subvols[uuid] = null; - uuids_usage[uuid] = null; - } - } - - if (!dequal(client.uuids_btrfs_subvols, uuids_subvols) || !dequal(client.uuids_btrfs_usage, uuids_usage) || - !dequal(client.uuids_btrfs_default_subvol, btrfs_default_subvol)) { - debug("btrfs_pol new subvols:", uuids_subvols); - client.uuids_btrfs_subvols = uuids_subvols; - client.uuids_btrfs_usage = uuids_usage; - debug("btrfs_pol usage:", uuids_usage); - client.uuids_btrfs_default_subvol = btrfs_default_subvol; - debug("btrfs_pol default subvolumes:", btrfs_default_subvol); - client.update(); - } -} - function btrfs_findmnt_poll() { if (!client.btrfs_mounts) client.btrfs_mounts = { }; @@ -391,15 +285,78 @@ function btrfs_findmnt_poll() { }); } +function btrfs_update(data) { + if (!client.uuids_btrfs_subvols) + client.uuids_btrfs_subvols = { }; + if (!client.uuids_btrfs_usage) + client.uuids_btrfs_usage = { }; + if (!client.uuids_btrfs_default_subvol) + client.uuids_btrfs_default_subvol = { }; + + const uuids_subvols = { }; + const uuids_usage = { }; + const default_subvol = { }; + + for (const uuid in data) { + console.log("DATA", uuid, data[uuid]); + uuids_subvols[uuid] = [{ pathname: "/", id: 5, parent: null }].concat(data[uuid].subvolumes); + uuids_usage[uuid] = data[uuid].usages; + default_subvol[uuid] = data[uuid].default_subvolume; + } + + if (!dequal(client.uuids_btrfs_subvols, uuids_subvols) || !dequal(client.uuids_btrfs_usage, uuids_usage) || + !dequal(client.uuids_btrfs_default_subvol, default_subvol)) { + debug("btrfs_pol new subvols:", uuids_subvols); + client.uuids_btrfs_subvols = uuids_subvols; + client.uuids_btrfs_usage = uuids_usage; + debug("btrfs_pol usage:", uuids_usage); + client.uuids_btrfs_default_subvol = default_subvol; + debug("btrfs_pol default subvolumes:", default_subvol); + client.update(); + } +} + +export async function btrfs_poll() { + if (!client.superuser.allowed || !client.features.btrfs) { + return; + } + + const data = JSON.parse(await python.spawn(btrfs_tool_py, ["poll"], { superuser: "require" })); + btrfs_update(data); +} + +function btrfs_start_monitor() { + if (!client.superuser.allowed || !client.features.btrfs) { + return; + } + + const channel = python.spawn(btrfs_tool_py, ["monitor"], { superuser: "require" }); + let buf = ""; + + channel.stream(output => { + buf += output; + const lines = buf.split("\n"); + buf = lines[lines.length - 1]; + if (lines.length >= 2) { + const data = JSON.parse(lines[lines.length - 2]); + btrfs_update(data); + } + }); + + channel.catch(err => { + // XXX - put up an oops, etc + console.log("BTRFS MONITOR ERROR", err); + }); +} + function btrfs_start_polling() { debug("starting polling for btrfs subvolumes"); - window.setInterval(btrfs_poll, 5000); client.uuids_btrfs_subvols = { }; client.uuids_btrfs_usage = { }; client.uuids_btrfs_default_subvol = { }; client.btrfs_mounts = { }; - btrfs_poll(); btrfs_findmnt_poll(); + btrfs_start_monitor(); } /* Derived indices. diff --git a/pkg/storaged/filesystem/mismounting.jsx b/pkg/storaged/filesystem/mismounting.jsx index 4fa4340aeae7..69a36b7c281a 100644 --- a/pkg/storaged/filesystem/mismounting.jsx +++ b/pkg/storaged/filesystem/mismounting.jsx @@ -51,6 +51,10 @@ export function check_mismounted_fsys(backing_block, content_block, fstab_config if (m == "/") return true; + // This is the mount point used for monitoring btrfs filesystems. + if (m.indexOf("/var/lib/cockpit/btrfs/") == 0) + return true; + return false; } diff --git a/pkg/storaged/utils.js b/pkg/storaged/utils.js index 6b8a61015c34..e87e8a2f933e 100644 --- a/pkg/storaged/utils.js +++ b/pkg/storaged/utils.js @@ -861,6 +861,9 @@ export function get_active_usage(client, path, top_action, child_action, is_temp } function enter_unmount(block, location, is_top) { + if (location.indexOf("/var/lib/cockpit/btrfs/") == 0) + return; + const [, mount_point] = get_fstab_config_with_client(client, block); const has_fstab_entry = is_temporary && location == mount_point; diff --git a/test/verify/check-storage-btrfs b/test/verify/check-storage-btrfs index bdcff493f1dc..62d5b2400ac5 100755 --- a/test/verify/check-storage-btrfs +++ b/test/verify/check-storage-btrfs @@ -537,6 +537,9 @@ class TestStorageBtrfs(storagelib.StorageCase): self.click_dropdown(self.card_row("Storage", name="sda") + " + tr", "Mount") self.dialog({"mount_point": mount_point}) + # Wait for Cockpit's own mount to go away + b.wait(lambda: "/var/lib/cockpit/btrfs" not in m.execute("findmnt")) + m.execute(f""" umount {mount_point} cryptsetup luksClose /dev/mapper/btrfs-test @@ -573,6 +576,9 @@ class TestStorageBtrfs(storagelib.StorageCase): m = self.machine b = self.browser + # We do this everywhere right now + automount_polling = True + disk = self.add_ram_disk(size=128) m.execute(f"mkfs.btrfs -L butter {disk}; mount {disk} /mnt; btrfs subvolume create /mnt/home; btrfs subvolume create /mnt/home/backups") @@ -581,8 +587,12 @@ class TestStorageBtrfs(storagelib.StorageCase): self.login_and_go("/storage") self.click_card_row("Storage", name="sda") - b.wait_not_present(self.card_row("btrfs filesystem", name="home")) - b.wait_not_present(self.card_row("btrfs filesystem", name="backups")) + if automount_polling: + b.wait_visible(self.card_row("btrfs filesystem", name="home")) + b.wait_visible(self.card_row("btrfs filesystem", name="backups")) + else: + b.wait_not_present(self.card_row("btrfs filesystem", name="home")) + b.wait_not_present(self.card_row("btrfs filesystem", name="backups")) # Add some fstab entries. Cockpit should pick up the # subvolumes mentioned in them and show them.