From 58fd152377fce7ef8fbaaa41b8ef255128dca3a5 Mon Sep 17 00:00:00 2001 From: Ronan Dunklau Date: Tue, 26 Nov 2024 14:11:45 +0100 Subject: [PATCH] Add more retries when restoring a basebackup Commit 4869d8491cb6df9ced1613af1be5a3fd3dfbc358 added logic to make the number of retries dependent on the backup size. Instead of allowing for one error every 64GB allow for one every 10GB. It's better to retry more than to start from scratch when things go wrong. --- pghoard/restore.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pghoard/restore.py b/pghoard/restore.py index 653a04bb..5f5e8b78 100644 --- a/pghoard/restore.py +++ b/pghoard/restore.py @@ -607,8 +607,8 @@ def _get_basebackup( os.chmod(dirname, 0o700) # Based on limited samples, there could be one stalled download per 122GiB of transfer - # So we tolerate one stall for every 64GiB of transfer (or STALL_MIN_RETRIES for smaller backup) - stall_max_retries = max(STALL_MIN_RETRIES, int(int(metadata.get("total-size-enc", 0)) / (64 * 2 ** 30))) + # So we tolerate one stall for every 10GiB of transfer (or STALL_MIN_RETRIES for smaller backup) + stall_max_retries = max(STALL_MIN_RETRIES, int(int(metadata.get("total-size-enc", 0)) / (10 * 2 ** 30))) fetcher = BasebackupFetcher( app_config=self.config,