From 709d5c92086a1adad678461080f8c2bf2eaba54c Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 21 Apr 2023 11:05:38 +0000 Subject: [PATCH 01/20] DAOS-0000 dfuse: Add a pre-read feature for non-cached files. When the kernel cache is in use but a file is not cached then pre-read the file on open. Signed-off-by: Ashley Pittman # ------------------------ >8 ------------------------ Skip-func-hw-test: true Skip-func-test: true Quick-Functional: true Test-tag: dfuse --- src/client/dfuse/dfuse.h | 47 ++++++++++- src/client/dfuse/ops/open.c | 29 +++++++ src/client/dfuse/ops/read.c | 154 +++++++++++++++++++++++++++++++++++- utils/node_local_test.py | 40 ++++++++++ 4 files changed, 267 insertions(+), 3 deletions(-) diff --git a/src/client/dfuse/dfuse.h b/src/client/dfuse/dfuse.h index a55086c8469..b604afb1752 100644 --- a/src/client/dfuse/dfuse.h +++ b/src/client/dfuse/dfuse.h @@ -92,12 +92,49 @@ struct dfuse_readdir_entry { off_t dre_next_offset; }; +/* Preread. + * + * If a file is opened when caching is on but the file is not cached and the size small enough + * to fit in a single buffer then start a pre-read of the file on open, then when reads do occur + * they can happen directly from the buffer. For 'linear reads' of a file this means that the + * read can be triggered sooner and performed as one dfs request. Make use of the pre-read + * code to only use this for trivial reads, if a file is not read linearly or it's written to + * then back off to the regular behavior, which will likely use the kernel cache. + * + * This works by creating a new descriptor which is pointed to by the open handle, on open dfuse + * decides if it will use pre-read and if so allocate a new descriptor, add it to the open handle + * and then once it's replied to the open immediately issue a read. The new descriptor includes + * a lock which is locked by open before it replies to the kernel request and unlocked by the dfs + * read callback. Read requests then take the lock to ensure the dfs read is complete and reply + * directly with the data in the buffer. + * + * This works up to the buffer size minus one, the pre-read tries to read the expected file size + * plus one so if the file size has changed then dfuse will detect this and back off to regular + * read. + * + * A dfuse_event is hung off this new descriptor and these come from the same pool as regular reads, + * this buffer is kept as long as it's needed but released as soon as possible, either on error or + * when EOF is returned to the kernel. If it's still present on release then it's freed then. + * + * TODO: The current issue with pread is that DFuse keeps a single cache time per inode and this + * is used for both data and metadata. For this feature to work the inode cache needs to be valid + * and the data cache needs to be empty. + */ +struct dfuse_read_ahead { + pthread_mutex_t dra_lock; + struct dfuse_event *dra_ev; + int dra_rc; +}; + /** what is returned as the handle for fuse fuse_file_info on create/open/opendir */ struct dfuse_obj_hdl { /** pointer to dfs_t */ dfs_t *doh_dfs; /** the DFS object handle. Not created for directories. */ dfs_obj_t *doh_obj; + + struct dfuse_read_ahead *doh_readahead; + /** the inode entry for the file */ struct dfuse_inode_entry *doh_ie; @@ -213,7 +250,10 @@ struct dfuse_event { struct dfuse_eq *de_eqt; struct dfuse_obj_hdl *de_oh; off_t de_req_position; /**< The file position requested by fuse */ - size_t de_req_len; + union { + size_t de_req_len; + size_t de_readahead_len; + }; void (*de_complete_cb)(struct dfuse_event *ev); }; @@ -543,7 +583,7 @@ struct fuse_lowlevel_ops dfuse_ops; #define DFUSE_REPLY_ENTRY(inode, req, entry) \ do { \ int __rc; \ - DFUSE_TRA_DEBUG(inode, "Returning entry inode %#lx mode %#o size %zi", \ + DFUSE_TRA_DEBUG(inode, "Returning entry inode %#lx mode %#o size %#zx", \ (entry).attr.st_ino, (entry).attr.st_mode, (entry).attr.st_size); \ if (entry.attr_timeout > 0) { \ (inode)->ie_stat = entry.attr; \ @@ -695,6 +735,9 @@ dfuse_cache_evict(struct dfuse_inode_entry *ie); bool dfuse_cache_get_valid(struct dfuse_inode_entry *ie, double max_age, double *timeout); +void +dfuse_pre_read(struct dfuse_projection_info *fs_handle, struct dfuse_obj_hdl *oh); + int check_for_uns_ep(struct dfuse_projection_info *fs_handle, struct dfuse_inode_entry *ie, char *attr, daos_size_t len); diff --git a/src/client/dfuse/ops/open.c b/src/client/dfuse/ops/open.c index d26c6159ef2..4ba306ad3d0 100644 --- a/src/client/dfuse/ops/open.c +++ b/src/client/dfuse/ops/open.c @@ -16,6 +16,7 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) struct dfuse_obj_hdl *oh = NULL; struct fuse_file_info fi_out = {0}; int rc; + bool prefetch = false; rlink = d_hash_rec_find(&fs_handle->dpi_iet, &ino, sizeof(ino)); if (!rlink) { @@ -58,6 +59,7 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) fi_out.keep_cache = 1; } else if (dfuse_cache_get_valid(ie, ie->ie_dfs->dfc_data_timeout, NULL)) { fi_out.keep_cache = 1; + prefetch = true; } if (fi_out.keep_cache) @@ -89,9 +91,21 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) atomic_fetch_add_relaxed(&ie->ie_open_count, 1); + if (prefetch && ie->ie_stat.st_size < (1024 * 1024)) { + D_ALLOC_PTR(oh->doh_readahead); + if (oh->doh_readahead) { + D_MUTEX_INIT(&oh->doh_readahead->dra_lock, 0); + D_MUTEX_LOCK(&oh->doh_readahead->dra_lock); + } + } + d_hash_rec_decref(&fs_handle->dpi_iet, rlink); DFUSE_REPLY_OPEN(oh, req, &fi_out); + if (oh->doh_readahead) { + dfuse_pre_read(fs_handle, oh); + } + return; err: d_hash_rec_decref(&fs_handle->dpi_iet, rlink); @@ -112,6 +126,21 @@ dfuse_cb_release(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) DFUSE_TRA_DEBUG(oh, "Closing %d %d", oh->doh_caching, oh->doh_keep_cache); + if (oh->doh_readahead) { + struct dfuse_event *ev = oh->doh_readahead->dra_ev; + + /* Grab this lock first to ensure that the read cb has been completed */ + D_MUTEX_LOCK(&oh->doh_readahead->dra_lock); + D_MUTEX_UNLOCK(&oh->doh_readahead->dra_lock); + + D_MUTEX_DESTROY(&oh->doh_readahead->dra_lock); + if (ev) { + daos_event_fini(&ev->de_ev); + d_slab_release(ev->de_eqt->de_read_slab, ev); + } + D_FREE(oh->doh_readahead); + } + /* If the file was read from then set the cache time for future use, however if the * file was written to then evict the cache. * The problem here is that if the file was written to then the contents will be in the diff --git a/src/client/dfuse/ops/read.c b/src/client/dfuse/ops/read.c index 7c9ef4d1701..47f8b96523a 100644 --- a/src/client/dfuse/ops/read.c +++ b/src/client/dfuse/ops/read.c @@ -30,7 +30,7 @@ dfuse_cb_read_complete(struct dfuse_event *ev) if (ev->de_len == 0) { DFUSE_TRA_DEBUG(oh, "%#zx-%#zx requested (EOF)", ev->de_req_position, - ev->de_req_position + ev->de_iov.iov_buf_len - 1); + ev->de_req_position + ev->de_req_len - 1); DFUSE_REPLY_BUFQ(oh, ev->de_req, ev->de_iov.iov_buf, ev->de_len); D_GOTO(release, 0); @@ -51,6 +51,53 @@ dfuse_cb_read_complete(struct dfuse_event *ev) d_slab_release(ev->de_eqt->de_read_slab, ev); } +static bool +dfuse_readahead_reply(fuse_req_t req, size_t len, off_t position, struct dfuse_obj_hdl *oh) +{ + size_t reply_len; + + if (oh->doh_readahead->dra_rc) { + DFUSE_REPLY_ERR_RAW(oh, req, oh->doh_readahead->dra_rc); + return true; + } + + if (!oh->doh_linear_read || oh->doh_readahead->dra_ev == NULL) { + DFUSE_TRA_ERROR(oh, "Readahead disabled"); + return false; + } + + if (oh->doh_linear_read_pos != position) { + DFUSE_TRA_ERROR(oh, "disabling readahead"); + return false; + } + + oh->doh_linear_read_pos = position + len; + if (position + len >= oh->doh_readahead->dra_ev->de_readahead_len) { + oh->doh_linear_read_eof = true; + } + + DFUSE_TRA_DEBUG(oh, "%#zx-%#zx requested", position, position + len - 1); + + /* At this point there is a buffer of known length that contains the data, and a read + * request. + * If the attempted read is bigger than the data then it will be truncated. + * It the atttempted read is smaller than the buffer it will be met in full. + */ + + if (position + len < oh->doh_readahead->dra_ev->de_readahead_len) { + reply_len = len; + DFUSE_TRA_DEBUG(oh, "%#zx-%#zx read", position, position + reply_len - 1); + } else { + /* The read will be truncated */ + reply_len = oh->doh_readahead->dra_ev->de_readahead_len - position; + DFUSE_TRA_DEBUG(oh, "%#zx-%#zx read %#zx-%#zx not read (truncated)", position, + position + reply_len - 1, position + reply_len, position + len - 1); + } + + DFUSE_REPLY_BUF(oh, req, oh->doh_readahead->dra_ev->de_iov.iov_buf + position, reply_len); + return true; +} + void dfuse_cb_read(fuse_req_t req, fuse_ino_t ino, size_t len, off_t position, struct fuse_file_info *fi) { @@ -68,10 +115,36 @@ dfuse_cb_read(fuse_req_t req, fuse_ino_t ino, size_t len, off_t position, struct DFUSE_TRA_DEBUG(oh, "Returning EOF early without round trip %#zx", position); oh->doh_linear_read_eof = false; oh->doh_linear_read = false; + + if (oh->doh_readahead) { + ev = oh->doh_readahead->dra_ev; + + D_MUTEX_LOCK(&oh->doh_readahead->dra_lock); + ev = oh->doh_readahead->dra_ev; + oh->doh_readahead->dra_ev = NULL; + D_MUTEX_UNLOCK(&oh->doh_readahead->dra_lock); + + if (ev) { + daos_event_fini(&ev->de_ev); + d_slab_release(ev->de_eqt->de_read_slab, ev); + } + } DFUSE_REPLY_BUFQ(oh, req, NULL, 0); return; } + if (oh->doh_readahead) { + bool replied; + + D_MUTEX_LOCK(&oh->doh_readahead->dra_lock); + replied = dfuse_readahead_reply(req, len, position, oh); + D_MUTEX_UNLOCK(&oh->doh_readahead->dra_lock); + + if (replied) { + return; + } + } + eqt = &fs_handle->dpi_eqt[eqt_idx % fs_handle->dpi_eqt_count]; ev = d_slab_acquire(eqt->de_read_slab); @@ -131,3 +204,82 @@ dfuse_cb_read(fuse_req_t req, fuse_ino_t ino, size_t len, off_t position, struct d_slab_release(eqt->de_read_slab, ev); } } + +static void +dfuse_cb_pre_read_complete(struct dfuse_event *ev) +{ + struct dfuse_obj_hdl *oh = ev->de_oh; + + oh->doh_readahead->dra_rc = ev->de_ev.ev_error; + + if (ev->de_ev.ev_error != 0) { + oh->doh_readahead->dra_rc = ev->de_ev.ev_error; + daos_event_fini(&ev->de_ev); + d_slab_release(ev->de_eqt->de_read_slab, ev); + oh->doh_readahead->dra_ev = NULL; + } + + /* If the length is not as expected then the stat size was incorrect so do not use this + * read and fallback into the normal read case. + */ + if (ev->de_len != ev->de_readahead_len) { + daos_event_fini(&ev->de_ev); + d_slab_release(ev->de_eqt->de_read_slab, ev); + oh->doh_readahead->dra_ev = NULL; + } + + D_MUTEX_UNLOCK(&oh->doh_readahead->dra_lock); +} + +void +dfuse_pre_read(struct dfuse_projection_info *fs_handle, struct dfuse_obj_hdl *oh) +{ + struct dfuse_eq *eqt; + int rc; + struct dfuse_event *ev; + uint64_t eqt_idx; + size_t len = oh->doh_ie->ie_stat.st_size; + + eqt_idx = atomic_fetch_add_relaxed(&fs_handle->dpi_eqt_idx, 1); + + eqt = &fs_handle->dpi_eqt[eqt_idx % fs_handle->dpi_eqt_count]; + + ev = d_slab_acquire(eqt->de_read_slab); + if (ev == NULL) + D_GOTO(err, rc = ENOMEM); + + /* Request a read one byte bigger than the expected file size, this way we should see + * a truncated read and through that will be able to detect if the file has been + * modified + */ + ev->de_iov.iov_len = len + 1; + ev->de_req = 0; + ev->de_sgl.sg_nr = 1; + ev->de_oh = oh; + ev->de_readahead_len = len; + ev->de_req_position = 0; + + ev->de_complete_cb = dfuse_cb_pre_read_complete; + oh->doh_readahead->dra_ev = ev; + + rc = dfs_read(oh->doh_dfs, oh->doh_obj, &ev->de_sgl, 0, &ev->de_len, &ev->de_ev); + if (rc != 0) { + D_GOTO(err, rc); + return; + } + + /* Send a message to the async thread to wake it up and poll for events */ + sem_post(&eqt->de_sem); + + /* Now ensure there are more descriptors for the next request */ + d_slab_restock(eqt->de_read_slab); + + return; +err: + oh->doh_readahead->dra_rc = rc; + if (ev) { + daos_event_fini(&ev->de_ev); + d_slab_release(eqt->de_read_slab, ev); + } + D_MUTEX_UNLOCK(&oh->doh_readahead->dra_lock); +} diff --git a/utils/node_local_test.py b/utils/node_local_test.py index 1f5d0bfd8f1..62e9da899cf 100755 --- a/utils/node_local_test.py +++ b/utils/node_local_test.py @@ -1853,6 +1853,46 @@ def test_read(self): print(data) assert data == 'test' + def test_pre_read(self): + """Test the pre-read code. + + Test reading a file which is previously unknown to fuse with caching on. This should go + into the pre_read code and load the file contents automatically after the open call. + """ + dfuse = DFuse(self.server, self.conf, container=self.container) + dfuse.start(v_hint='pre_read_0') + + with open(join(dfuse.dir, 'file0'), 'w') as fd: + fd.write('test') + + with open(join(dfuse.dir, 'file1'), 'w') as fd: + fd.write('test') + + with open(join(dfuse.dir, 'file2'), 'w') as fd: + fd.write('testing') + + if dfuse.stop(): + self.fatal_errors = True + + dfuse = DFuse(self.server, self.conf, caching=True, container=self.container) + dfuse.start(v_hint='pre_read_1') + + with open(join(dfuse.dir, 'file0'), 'r') as fd: + data0 = fd.read() + + with open(join(dfuse.dir, 'file1'), 'r') as fd: + data1 = fd.read(16) + + with open(join(dfuse.dir, 'file2'), 'r') as fd: + data2 = fd.read(2) + + if dfuse.stop(): + self.fatal_errors = True + print(data0) + assert data0 == 'test' + assert data1 == 'test' + assert data2 == 'te' + def test_two_mounts(self): """Create two mounts, and check that a file created in one can be read from the other""" dfuse0 = DFuse(self.server, From b3af6ba506c24f12fde48f20397c9339831a2854 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Mon, 24 Apr 2023 13:16:36 +0000 Subject: [PATCH 02/20] Skip some logging. Test-tag: dfuse Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/dfuse/ops/read.c | 4 +--- utils/node_local_test.py | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/client/dfuse/ops/read.c b/src/client/dfuse/ops/read.c index 47f8b96523a..55c2941541a 100644 --- a/src/client/dfuse/ops/read.c +++ b/src/client/dfuse/ops/read.c @@ -76,8 +76,6 @@ dfuse_readahead_reply(fuse_req_t req, size_t len, off_t position, struct dfuse_o oh->doh_linear_read_eof = true; } - DFUSE_TRA_DEBUG(oh, "%#zx-%#zx requested", position, position + len - 1); - /* At this point there is a buffer of known length that contains the data, and a read * request. * If the attempted read is bigger than the data then it will be truncated. @@ -94,7 +92,7 @@ dfuse_readahead_reply(fuse_req_t req, size_t len, off_t position, struct dfuse_o position + reply_len - 1, position + reply_len, position + len - 1); } - DFUSE_REPLY_BUF(oh, req, oh->doh_readahead->dra_ev->de_iov.iov_buf + position, reply_len); + DFUSE_REPLY_BUFQ(oh, req, oh->doh_readahead->dra_ev->de_iov.iov_buf + position, reply_len); return true; } diff --git a/utils/node_local_test.py b/utils/node_local_test.py index 62e9da899cf..f194fe7a32e 100755 --- a/utils/node_local_test.py +++ b/utils/node_local_test.py @@ -22,6 +22,7 @@ import stat import errno import argparse +import random import threading import functools import traceback @@ -1871,6 +1872,14 @@ def test_pre_read(self): with open(join(dfuse.dir, 'file2'), 'w') as fd: fd.write('testing') + raw_data0 = ''.join(random.choices(['d', 'a', 'o', 's'], k=1024 * 1024)) # nosec + with open(join(dfuse.dir, 'file3'), 'w') as fd: + fd.write(raw_data0) + + raw_data1 = ''.join(random.choices(['d', 'a', 'o', 's'], k=(1024 * 1024) - 1)) # nosec + with open(join(dfuse.dir, 'file4'), 'w') as fd: + fd.write(raw_data1) + if dfuse.stop(): self.fatal_errors = True @@ -1886,12 +1895,22 @@ def test_pre_read(self): with open(join(dfuse.dir, 'file2'), 'r') as fd: data2 = fd.read(2) + with open(join(dfuse.dir, 'file3'), 'r') as fd: + data3 = fd.read() + + with open(join(dfuse.dir, 'file4'), 'r') as fd: + data4 = fd.read() + data5 = fd.read() + if dfuse.stop(): self.fatal_errors = True print(data0) assert data0 == 'test' assert data1 == 'test' assert data2 == 'te' + assert raw_data0 == data3 + assert raw_data1 == data4 + assert len(data5) == 0 def test_two_mounts(self): """Create two mounts, and check that a file created in one can be read from the other""" From 5134632b76c436141959cc0e8d0e891f8105a39d Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Mon, 24 Apr 2023 13:26:33 +0000 Subject: [PATCH 03/20] Fix a potential crash on free. Test-tag: dfuse Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/dfuse/ops/open.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/client/dfuse/ops/open.c b/src/client/dfuse/ops/open.c index 4ba306ad3d0..3a048991f29 100644 --- a/src/client/dfuse/ops/open.c +++ b/src/client/dfuse/ops/open.c @@ -91,7 +91,12 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) atomic_fetch_add_relaxed(&ie->ie_open_count, 1); - if (prefetch && ie->ie_stat.st_size < (1024 * 1024)) { + /* Enable this for smaller files, up to but not including the max read size. This means + * that files of 1MiB or larger will not be pre-read but smaller files will. One extra + * unused byte at the end of the buffer will be used to detect if the file has grown + * since dfuse last observed the size. + */ + if (prefetch && ie->ie_stat.st_size < DFUSE_MAX_READ) { D_ALLOC_PTR(oh->doh_readahead); if (oh->doh_readahead) { D_MUTEX_INIT(&oh->doh_readahead->dra_lock, 0); @@ -102,9 +107,8 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) d_hash_rec_decref(&fs_handle->dpi_iet, rlink); DFUSE_REPLY_OPEN(oh, req, &fi_out); - if (oh->doh_readahead) { + if (oh->doh_readahead) dfuse_pre_read(fs_handle, oh); - } return; err: @@ -127,12 +131,17 @@ dfuse_cb_release(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) DFUSE_TRA_DEBUG(oh, "Closing %d %d", oh->doh_caching, oh->doh_keep_cache); if (oh->doh_readahead) { - struct dfuse_event *ev = oh->doh_readahead->dra_ev; + struct dfuse_event *ev; - /* Grab this lock first to ensure that the read cb has been completed */ + /* Grab this lock first to ensure that the read cb has been completed. The + * callback might register an error and release ev so do not read it's value + * until after this has completed. + */ D_MUTEX_LOCK(&oh->doh_readahead->dra_lock); D_MUTEX_UNLOCK(&oh->doh_readahead->dra_lock); + ev = oh->doh_readahead->dra_ev; + D_MUTEX_DESTROY(&oh->doh_readahead->dra_lock); if (ev) { daos_event_fini(&ev->de_ev); From 6e8f25884f6c96e33c5dd0f5c9b223973e08e707 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Mon, 24 Apr 2023 13:55:03 +0000 Subject: [PATCH 04/20] Add some comments on testing. Test-tag: dfuse Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/dfuse/ops/open.c | 2 +- utils/node_local_test.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/client/dfuse/ops/open.c b/src/client/dfuse/ops/open.c index 3a048991f29..db91d88c3c5 100644 --- a/src/client/dfuse/ops/open.c +++ b/src/client/dfuse/ops/open.c @@ -96,7 +96,7 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) * unused byte at the end of the buffer will be used to detect if the file has grown * since dfuse last observed the size. */ - if (prefetch && ie->ie_stat.st_size < DFUSE_MAX_READ) { + if (prefetch && ie->ie_stat.st_size > 0 && ie->ie_stat.st_size < DFUSE_MAX_READ) { D_ALLOC_PTR(oh->doh_readahead); if (oh->doh_readahead) { D_MUTEX_INIT(&oh->doh_readahead->dra_lock, 0); diff --git a/utils/node_local_test.py b/utils/node_local_test.py index f194fe7a32e..bc59da08f16 100755 --- a/utils/node_local_test.py +++ b/utils/node_local_test.py @@ -1902,6 +1902,10 @@ def test_pre_read(self): data4 = fd.read() data5 = fd.read() + # This should not use the pre-read feature, to be validated via the logs. + with open(join(dfuse.dir, 'file4'), 'r') as fd: + data6 = fd.read() + if dfuse.stop(): self.fatal_errors = True print(data0) @@ -1911,6 +1915,7 @@ def test_pre_read(self): assert raw_data0 == data3 assert raw_data1 == data4 assert len(data5) == 0 + assert raw_data1 == data6 def test_two_mounts(self): """Create two mounts, and check that a file created in one can be read from the other""" From 332628c4feabbbc8de6c0f7d48b2448f1c0c0074 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Mon, 24 Apr 2023 15:16:26 +0000 Subject: [PATCH 05/20] Fix another crash. Test-tag: dfuse Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/dfuse/ops/read.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/dfuse/ops/read.c b/src/client/dfuse/ops/read.c index 55c2941541a..9f0355f509b 100644 --- a/src/client/dfuse/ops/read.c +++ b/src/client/dfuse/ops/read.c @@ -278,6 +278,7 @@ dfuse_pre_read(struct dfuse_projection_info *fs_handle, struct dfuse_obj_hdl *oh if (ev) { daos_event_fini(&ev->de_ev); d_slab_release(eqt->de_read_slab, ev); + oh->doh_readahead->dra_ev = NULL; } D_MUTEX_UNLOCK(&oh->doh_readahead->dra_lock); } From ade793eab822a3ab9f506332e4de8c7695cfe9a5 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Mon, 24 Apr 2023 16:17:55 +0000 Subject: [PATCH 06/20] Fix another crash. Test-tag: dfuse Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/dfuse/ops/read.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/client/dfuse/ops/read.c b/src/client/dfuse/ops/read.c index 9f0355f509b..63a65d88a98 100644 --- a/src/client/dfuse/ops/read.c +++ b/src/client/dfuse/ops/read.c @@ -62,12 +62,16 @@ dfuse_readahead_reply(fuse_req_t req, size_t len, off_t position, struct dfuse_o } if (!oh->doh_linear_read || oh->doh_readahead->dra_ev == NULL) { - DFUSE_TRA_ERROR(oh, "Readahead disabled"); + DFUSE_TRA_DEBUG(oh, "Readahead disabled"); return false; } if (oh->doh_linear_read_pos != position) { - DFUSE_TRA_ERROR(oh, "disabling readahead"); + DFUSE_TRA_DEBUG(oh, "disabling readahead"); + daos_event_fini(&oh->doh_readahead->dra_ev); + d_slab_release(oh->doh_readahead->dra_ev->de_eqt->de_read_slab, + oh->doh_readahead->dra_ev); + oh->doh_readahead->dra_ev = NULL; return false; } From 2871a77c9bb6ce29848a6a0b67ce84ee220a96d9 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Mon, 24 Apr 2023 16:31:30 +0000 Subject: [PATCH 07/20] Fix compile. Test-tag: dfuse Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/dfuse/ops/read.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/dfuse/ops/read.c b/src/client/dfuse/ops/read.c index 63a65d88a98..d73e74e0eee 100644 --- a/src/client/dfuse/ops/read.c +++ b/src/client/dfuse/ops/read.c @@ -68,7 +68,7 @@ dfuse_readahead_reply(fuse_req_t req, size_t len, off_t position, struct dfuse_o if (oh->doh_linear_read_pos != position) { DFUSE_TRA_DEBUG(oh, "disabling readahead"); - daos_event_fini(&oh->doh_readahead->dra_ev); + daos_event_fini(&oh->doh_readahead->dra_ev->de_ev); d_slab_release(oh->doh_readahead->dra_ev->de_eqt->de_read_slab, oh->doh_readahead->dra_ev); oh->doh_readahead->dra_ev = NULL; From 3b9de4af397ebb0c8b21e969c244228c34f4786a Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Thu, 27 Apr 2023 08:55:56 +0000 Subject: [PATCH 08/20] Test properly with seperate data cache times. Required-githooks: true Signed-off-by: Ashley Pittman --- ci/unit/test_nlt_node.sh | 4 ++-- src/client/dfuse/dfuse.h | 10 +++++----- src/client/dfuse/dfuse_core.c | 4 ++-- src/client/dfuse/ops/open.c | 16 +++++++++++----- 4 files changed, 20 insertions(+), 14 deletions(-) diff --git a/ci/unit/test_nlt_node.sh b/ci/unit/test_nlt_node.sh index 98b11e38b3e..3fa058eaaff 100755 --- a/ci/unit/test_nlt_node.sh +++ b/ci/unit/test_nlt_node.sh @@ -21,7 +21,7 @@ sudo bash -c ". ./utils/sl/setup_local.sh; ./utils/setup_daos_server_helper.sh" # NLT will mount /mnt/daos itself. # TODO: Enable this for DAOS-10905 -# ./utils/node_local_test.py --max-log-size 800MiB --dfuse-dir /localhome/jenkins/ \ +# ./utils/node_local_test.py --max-log-size 900MiB --dfuse-dir /localhome/jenkins/ \ # --server-valgrind all -./utils/node_local_test.py --max-log-size 800MiB --dfuse-dir /localhome/jenkins/ all +./utils/node_local_test.py --max-log-size 900MiB --dfuse-dir /localhome/jenkins/ all diff --git a/src/client/dfuse/dfuse.h b/src/client/dfuse/dfuse.h index 7952880549f..a6e7a300e6d 100644 --- a/src/client/dfuse/dfuse.h +++ b/src/client/dfuse/dfuse.h @@ -557,7 +557,7 @@ struct fuse_lowlevel_ops dfuse_ops; #define DFUSE_REPLY_OPEN(oh, req, _fi) \ do { \ int __rc; \ - DFUSE_TRA_DEBUG(oh, "Returning open"); \ + DFUSE_TRA_DEBUG(oh, "Returning open, keep_cache %d", (_fi)->keep_cache); \ __rc = fuse_reply_open(req, _fi); \ if (__rc != 0) \ DFUSE_TRA_ERROR(oh, "fuse_reply_open returned %d:%s", __rc, \ @@ -725,21 +725,21 @@ dfuse_cache_evict_dir(struct dfuse_projection_info *fs_handle, struct dfuse_inod void dfuse_mcache_set_time(struct dfuse_inode_entry *ie); -/* Set the cache as invalid */ +/* Set the metadata cache as invalid */ void dfuse_mcache_evict(struct dfuse_inode_entry *ie); -/* Check the cache setting against a given timeout, and return time left */ +/* Check the metadata cache setting against a given timeout, and return time left */ bool dfuse_mcache_get_valid(struct dfuse_inode_entry *ie, double max_age, double *timeout); /* Data caching functions */ -/* Mark the cache as up-to-date from now */ +/* Mark the data cache as up-to-date from now */ void dfuse_dcache_set_time(struct dfuse_inode_entry *ie); -/* Set the cache as invalid */ +/* Set the data cache as invalid */ void dfuse_dcache_evict(struct dfuse_inode_entry *ie); diff --git a/src/client/dfuse/dfuse_core.c b/src/client/dfuse/dfuse_core.c index b068c82e617..f1db26def13 100644 --- a/src/client/dfuse/dfuse_core.c +++ b/src/client/dfuse/dfuse_core.c @@ -1176,7 +1176,7 @@ dfuse_read_event_reset(void *arg) int rc; if (ev->de_iov.iov_buf == NULL) { - D_ALLOC(ev->de_iov.iov_buf, DFUSE_MAX_READ); + D_ALLOC_NZ(ev->de_iov.iov_buf, DFUSE_MAX_READ); if (ev->de_iov.iov_buf == NULL) return false; @@ -1200,7 +1200,7 @@ dfuse_write_event_reset(void *arg) int rc; if (ev->de_iov.iov_buf == NULL) { - D_ALLOC(ev->de_iov.iov_buf, DFUSE_MAX_READ); + D_ALLOC_NZ(ev->de_iov.iov_buf, DFUSE_MAX_READ); if (ev->de_iov.iov_buf == NULL) return false; diff --git a/src/client/dfuse/ops/open.c b/src/client/dfuse/ops/open.c index c1b8d30c126..6b6812e114d 100644 --- a/src/client/dfuse/ops/open.c +++ b/src/client/dfuse/ops/open.c @@ -55,11 +55,16 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) if (fi->flags & O_DIRECT) fi_out.direct_io = 1; - if (atomic_load_relaxed(&ie->ie_open_count) > 0) { - fi_out.keep_cache = 1; - } else if (dfuse_dcache_get_valid(ie, ie->ie_dfs->dfc_data_timeout)) { + /* If the file is already open or (potentially) in cache then allow any existing + * kernel cache to be used. If not then use pre-read. + * This should mean that pre-read is only used on the first read, and on files + * which pre-existed in the container. + */ + if (atomic_load_relaxed(&ie->ie_open_count) > 0 || + dfuse_dcache_get_valid(ie, ie->ie_dfs->dfc_data_timeout)) { fi_out.keep_cache = 1; - prefetch = true; + } else { + prefetch = true; } } else { fi_out.direct_io = 1; @@ -160,8 +165,9 @@ dfuse_cb_release(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) if (atomic_load_relaxed(&oh->doh_write_count) != 0) { if (oh->doh_caching) { if (il_calls == 0) { - DFUSE_TRA_DEBUG(oh, "Evicting metadata cache"); + DFUSE_TRA_DEBUG(oh, "Evicting metadata cache, setting data cache"); dfuse_mcache_evict(oh->doh_ie); + dfuse_dcache_set_time(oh->doh_ie); } else { DFUSE_TRA_DEBUG(oh, "Evicting cache"); dfuse_cache_evict(oh->doh_ie); From 0da5cfd80a8f7b9b6747b245e51c6cf280052204 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Thu, 27 Apr 2023 08:58:07 +0000 Subject: [PATCH 09/20] Fix a formatting issue. Test-tag: dfuse Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/dfuse/ops/open.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/dfuse/ops/open.c b/src/client/dfuse/ops/open.c index 6b6812e114d..bf94b55299d 100644 --- a/src/client/dfuse/ops/open.c +++ b/src/client/dfuse/ops/open.c @@ -12,7 +12,7 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { struct dfuse_projection_info *fs_handle = fuse_req_userdata(req); struct dfuse_inode_entry *ie; - d_list_t *rlink; + d_list_t *rlink; struct dfuse_obj_hdl *oh = NULL; struct fuse_file_info fi_out = {0}; int rc; From ea1784a5bce1e101902a345d7399e079dff23bf4 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Tue, 2 May 2023 10:32:10 +0000 Subject: [PATCH 10/20] Update to read up to max buffer size. Test-tag: dfuse Required-githooks: true Signed-off-by: Ashley Pittman --- ci/unit/test_nlt_node.sh | 2 +- src/client/dfuse/dfuse.h | 10 +++------- src/client/dfuse/ops/open.c | 8 ++------ src/client/dfuse/ops/read.c | 11 ++++------- 4 files changed, 10 insertions(+), 21 deletions(-) diff --git a/ci/unit/test_nlt_node.sh b/ci/unit/test_nlt_node.sh index 3fa058eaaff..75f818ded4c 100755 --- a/ci/unit/test_nlt_node.sh +++ b/ci/unit/test_nlt_node.sh @@ -21,7 +21,7 @@ sudo bash -c ". ./utils/sl/setup_local.sh; ./utils/setup_daos_server_helper.sh" # NLT will mount /mnt/daos itself. # TODO: Enable this for DAOS-10905 -# ./utils/node_local_test.py --max-log-size 900MiB --dfuse-dir /localhome/jenkins/ \ +# ./utils/node_local_test.py --max-log-size 800MiB --dfuse-dir /localhome/jenkins/ \ # --server-valgrind all ./utils/node_local_test.py --max-log-size 900MiB --dfuse-dir /localhome/jenkins/ all diff --git a/src/client/dfuse/dfuse.h b/src/client/dfuse/dfuse.h index a6e7a300e6d..ade3ce219f9 100644 --- a/src/client/dfuse/dfuse.h +++ b/src/client/dfuse/dfuse.h @@ -108,17 +108,13 @@ struct dfuse_readdir_entry { * read callback. Read requests then take the lock to ensure the dfs read is complete and reply * directly with the data in the buffer. * - * This works up to the buffer size minus one, the pre-read tries to read the expected file size - * plus one so if the file size has changed then dfuse will detect this and back off to regular - * read. + * This works up to the buffer size, the pre-read tries to read the expected file size is smaller + * then dfuse will detect this and back off to regular read, however it will not detect if the + * file has grown in size. * * A dfuse_event is hung off this new descriptor and these come from the same pool as regular reads, * this buffer is kept as long as it's needed but released as soon as possible, either on error or * when EOF is returned to the kernel. If it's still present on release then it's freed then. - * - * TODO: The current issue with pread is that DFuse keeps a single cache time per inode and this - * is used for both data and metadata. For this feature to work the inode cache needs to be valid - * and the data cache needs to be empty. */ struct dfuse_read_ahead { pthread_mutex_t dra_lock; diff --git a/src/client/dfuse/ops/open.c b/src/client/dfuse/ops/open.c index bf94b55299d..ce4b8197ac4 100644 --- a/src/client/dfuse/ops/open.c +++ b/src/client/dfuse/ops/open.c @@ -93,12 +93,8 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) atomic_fetch_add_relaxed(&ie->ie_open_count, 1); - /* Enable this for smaller files, up to but not including the max read size. This means - * that files of 1MiB or larger will not be pre-read but smaller files will. One extra - * unused byte at the end of the buffer will be used to detect if the file has grown - * since dfuse last observed the size. - */ - if (prefetch && ie->ie_stat.st_size > 0 && ie->ie_stat.st_size < DFUSE_MAX_READ) { + /* Enable this for files up to the max read size. */ + if (prefetch && ie->ie_stat.st_size > 0 && ie->ie_stat.st_size <= DFUSE_MAX_READ) { D_ALLOC_PTR(oh->doh_readahead); if (oh->doh_readahead) { D_MUTEX_INIT(&oh->doh_readahead->dra_lock, 0); diff --git a/src/client/dfuse/ops/read.c b/src/client/dfuse/ops/read.c index d73e74e0eee..d0e10c3d989 100644 --- a/src/client/dfuse/ops/read.c +++ b/src/client/dfuse/ops/read.c @@ -221,8 +221,9 @@ dfuse_cb_pre_read_complete(struct dfuse_event *ev) oh->doh_readahead->dra_ev = NULL; } - /* If the length is not as expected then the stat size was incorrect so do not use this - * read and fallback into the normal read case. + /* If the length is not as expected then the file has been modified since the last stat so + * discard this cache and use regular reads. Note that this will only detect files which + * have shrunk in size, not grown. */ if (ev->de_len != ev->de_readahead_len) { daos_event_fini(&ev->de_ev); @@ -250,11 +251,7 @@ dfuse_pre_read(struct dfuse_projection_info *fs_handle, struct dfuse_obj_hdl *oh if (ev == NULL) D_GOTO(err, rc = ENOMEM); - /* Request a read one byte bigger than the expected file size, this way we should see - * a truncated read and through that will be able to detect if the file has been - * modified - */ - ev->de_iov.iov_len = len + 1; + ev->de_iov.iov_len = len; ev->de_req = 0; ev->de_sgl.sg_nr = 1; ev->de_oh = oh; From 0b6c1590dcc707d41972e5e4feb76832273f4d66 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Thu, 8 Jun 2023 15:12:10 +0000 Subject: [PATCH 11/20] fix formatting Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/dfuse/dfuse.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client/dfuse/dfuse.h b/src/client/dfuse/dfuse.h index f77f9b6f2a0..a961d9d83d2 100644 --- a/src/client/dfuse/dfuse.h +++ b/src/client/dfuse/dfuse.h @@ -85,7 +85,6 @@ dfuse_launch_fuse(struct dfuse_projection_info *fs_handle, struct fuse_args *arg struct dfuse_inode_entry; - /* Preread. * * If a file is opened when caching is on but the file is not cached and the size small enough From 27c4e7df6c41319e1003fb5220a7c84b8aa32c20 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Mon, 3 Jul 2023 16:24:44 +0000 Subject: [PATCH 12/20] Improve the decision on when to readahead. Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/dfuse/dfuse.h | 9 ++++++++ src/client/dfuse/ops/create.c | 2 ++ src/client/dfuse/ops/open.c | 41 +++++++++++++++++++++++++++-------- 3 files changed, 43 insertions(+), 9 deletions(-) diff --git a/src/client/dfuse/dfuse.h b/src/client/dfuse/dfuse.h index 49453973250..7c5c4172aaf 100644 --- a/src/client/dfuse/dfuse.h +++ b/src/client/dfuse/dfuse.h @@ -131,6 +131,8 @@ struct dfuse_obj_hdl { /** the inode entry for the file */ struct dfuse_inode_entry *doh_ie; + struct dfuse_inode_entry *doh_parent_dir; + /** readdir handle. */ struct dfuse_readdir_hdl *doh_rd; @@ -826,6 +828,13 @@ struct dfuse_inode_entry { /** File has been unlinked from daos */ bool ie_unlinked; + + /** Last file closed in this directory was read linearly. Directories only. + * + * Set on close() of a file in the directory to the value of linear_read from the fh. + * Checked on open of a file to determine if pre-caching is used. + */ + ATOMIC bool ie_linear_read; }; extern char *duns_xattr_name; diff --git a/src/client/dfuse/ops/create.c b/src/client/dfuse/ops/create.c index 4959e4dcabe..7b0c387b9ae 100644 --- a/src/client/dfuse/ops/create.c +++ b/src/client/dfuse/ops/create.c @@ -111,6 +111,8 @@ dfuse_cb_create(fuse_req_t req, struct dfuse_inode_entry *parent, DFUSE_TRA_DEBUG(parent, "Parent:%#lx '%s'", parent->ie_stat.st_ino, name); + atomic_store_relaxed(&parent->ie_linear_read, false); + /* O_LARGEFILE should always be set on 64 bit systems, and in fact is * defined to 0 so IOF defines LARGEFILE to the value that O_LARGEFILE * would otherwise be using and check that is set. diff --git a/src/client/dfuse/ops/open.c b/src/client/dfuse/ops/open.c index 62097c570ab..f2ae089ae57 100644 --- a/src/client/dfuse/ops/open.c +++ b/src/client/dfuse/ops/open.c @@ -10,17 +10,18 @@ void dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { - struct dfuse_projection_info *fs_handle = fuse_req_userdata(req); + struct dfuse_info *dfuse_info = fuse_req_userdata(req); struct dfuse_inode_entry *ie; d_list_t *rlink; + d_list_t *plink; struct dfuse_obj_hdl *oh = NULL; struct fuse_file_info fi_out = {0}; int rc; bool prefetch = false; - rlink = d_hash_rec_find(&fs_handle->dpi_iet, &ino, sizeof(ino)); + rlink = d_hash_rec_find(&dfuse_info->dpi_iet, &ino, sizeof(ino)); if (!rlink) { - DFUSE_REPLY_ERR_RAW(fs_handle, req, ENOENT); + DFUSE_REPLY_ERR_RAW(dfuse_info, req, ENOENT); return; } ie = container_of(rlink, struct dfuse_inode_entry, ie_htl); @@ -31,12 +32,16 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) DFUSE_TRA_UP(oh, ie, "open handle"); - dfuse_open_handle_init(fs_handle, oh, ie); + dfuse_open_handle_init(dfuse_info, oh, ie); + + plink = d_hash_rec_find(&dfuse_info->dpi_iet, &ie->ie_parent, sizeof(ie->ie_parent)); + if (plink) + oh->doh_parent_dir = container_of(plink, struct dfuse_inode_entry, ie_htl); /* Upgrade fd permissions from O_WRONLY to O_RDWR if wb caching is * enabled so the kernel can do read-modify-write */ - if (ie->ie_dfs->dfc_data_timeout != 0 && fs_handle->di_wb_cache && + if (ie->ie_dfs->dfc_data_timeout != 0 && dfuse_info->di_wb_cache && (fi->flags & O_ACCMODE) == O_WRONLY) { DFUSE_TRA_DEBUG(ie, "Upgrading fd to O_RDRW"); fi->flags &= ~O_ACCMODE; @@ -93,6 +98,12 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) atomic_fetch_add_relaxed(&ie->ie_open_count, 1); + if (prefetch && oh->doh_parent_dir) { + bool use_linear_read = atomic_load_relaxed(&oh->doh_parent_dir->ie_linear_read); + + prefetch = use_linear_read; + } + /* Enable this for files up to the max read size. */ if (prefetch && ie->ie_stat.st_size > 0 && ie->ie_stat.st_size <= DFUSE_MAX_READ) { D_ALLOC_PTR(oh->doh_readahead); @@ -102,16 +113,16 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) } } - d_hash_rec_decref(&fs_handle->dpi_iet, rlink); + d_hash_rec_decref(&dfuse_info->dpi_iet, rlink); DFUSE_REPLY_OPEN(oh, req, &fi_out); if (oh->doh_readahead) - dfuse_pre_read(fs_handle, oh); + dfuse_pre_read(dfuse_info, oh); return; err: - d_hash_rec_decref(&fs_handle->dpi_iet, rlink); - dfuse_oh_free(fs_handle, oh); + d_hash_rec_decref(&dfuse_info->dpi_iet, rlink); + dfuse_oh_free(dfuse_info, oh); DFUSE_REPLY_ERR_RAW(ie, req, rc); } @@ -193,5 +204,17 @@ dfuse_cb_release(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) DFUSE_REPLY_ZERO(oh, req); else DFUSE_REPLY_ERR_RAW(oh, req, rc); + if (oh->doh_parent_dir) { + bool use_linear_read = false; + + if (oh->doh_linear_read && oh->doh_linear_read_eof) + use_linear_read = true; + + DFUSE_TRA_DEBUG(oh->doh_parent_dir, "Setting linear_read to %d", use_linear_read); + + atomic_store_relaxed(&oh->doh_parent_dir->ie_linear_read, use_linear_read); + + d_hash_rec_decref(&dfuse_info->dpi_iet, &oh->doh_parent_dir->ie_htl); + } dfuse_oh_free(dfuse_info, oh); } From 238a793f9d1e032d6e3a2e8d18b387452f4b375e Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Mon, 3 Jul 2023 16:44:41 +0000 Subject: [PATCH 13/20] Update comment. Test-tag: dfuse Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/dfuse/dfuse.h | 51 ++++++++++++++++++++++++++++--------- src/client/dfuse/ops/open.c | 31 +++++++++------------- 2 files changed, 51 insertions(+), 31 deletions(-) diff --git a/src/client/dfuse/dfuse.h b/src/client/dfuse/dfuse.h index 7c5c4172aaf..75ef013b04c 100644 --- a/src/client/dfuse/dfuse.h +++ b/src/client/dfuse/dfuse.h @@ -91,23 +91,32 @@ struct dfuse_inode_entry; /* Preread. * - * If a file is opened when caching is on but the file is not cached and the size small enough - * to fit in a single buffer then start a pre-read of the file on open, then when reads do occur - * they can happen directly from the buffer. For 'linear reads' of a file this means that the - * read can be triggered sooner and performed as one dfs request. Make use of the pre-read - * code to only use this for trivial reads, if a file is not read linearly or it's written to - * then back off to the regular behavior, which will likely use the kernel cache. + * DFuse can start a pre-read of the file on open, then when reads do occur they can happen directly + * from the buffer. For 'linear reads' of a file this means that the read can be triggered sooner + * and performed as one dfs request. Make use of the pre-read code to only use this for trivial + * reads, if a file is not read linearly or it's written to then back off to the regular behavior, + * which will likely use the kernel cache. + * + * Pre-read is enabled when: + * Caching is enabled + * The file is not cached + * The file is small enough to fit in one buffer (1mb) + * The previous file from the same directory was read linearly. + * Similar to the READDIR_PLUS_AUTO logic this feature is enabled bassed on the I/O pattern of the + * most recent access to the parent directory, general I/O workloads or interception library use are + * unlikely to trigger this code however something that is reading the entire contents of a + * directory tree should. * * This works by creating a new descriptor which is pointed to by the open handle, on open dfuse * decides if it will use pre-read and if so allocate a new descriptor, add it to the open handle - * and then once it's replied to the open immediately issue a read. The new descriptor includes - * a lock which is locked by open before it replies to the kernel request and unlocked by the dfs - * read callback. Read requests then take the lock to ensure the dfs read is complete and reply - * directly with the data in the buffer. + * and then once it's replied to the open immediately issue a read. The new descriptor includes a + * lock which is locked by open before it replies to the kernel request and unlocked by the dfs read + * callback. Read requests then take the lock to ensure the dfs read is complete and reply directly + * with the data in the buffer. * * This works up to the buffer size, the pre-read tries to read the expected file size is smaller - * then dfuse will detect this and back off to regular read, however it will not detect if the - * file has grown in size. + * then dfuse will detect this and back off to regular read, however it will not detect if the file + * has grown in size. * * A dfuse_event is hung off this new descriptor and these come from the same pool as regular reads, * this buffer is kept as long as it's needed but released as soon as possible, either on error or @@ -837,6 +846,24 @@ struct dfuse_inode_entry { ATOMIC bool ie_linear_read; }; +static inline struct dfuse_inode_entry * +dfuse_inode_lookup(struct dfuse_info *dfuse_info, fuse_ino_t ino) +{ + d_list_t *rlink; + + rlink = d_hash_rec_find(&dfuse_info->dpi_iet, &ino, sizeof(ino)); + if (!rlink) + return NULL; + + return container_of(rlink, struct dfuse_inode_entry, ie_htl); +} + +static inline void +dfuse_inode_decref(struct dfuse_info *dfuse_info, struct dfuse_inode_entry *ie) +{ + d_hash_rec_decref(&dfuse_info->dpi_iet, &ie->ie_htl); +} + extern char *duns_xattr_name; /* Generate the inode to use for this dfs object. This is generating a single diff --git a/src/client/dfuse/ops/open.c b/src/client/dfuse/ops/open.c index f2ae089ae57..d366dd086dd 100644 --- a/src/client/dfuse/ops/open.c +++ b/src/client/dfuse/ops/open.c @@ -10,14 +10,13 @@ void dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { - struct dfuse_info *dfuse_info = fuse_req_userdata(req); - struct dfuse_inode_entry *ie; - d_list_t *rlink; - d_list_t *plink; - struct dfuse_obj_hdl *oh = NULL; - struct fuse_file_info fi_out = {0}; - int rc; - bool prefetch = false; + struct dfuse_info *dfuse_info = fuse_req_userdata(req); + struct dfuse_inode_entry *ie; + d_list_t *rlink; + struct dfuse_obj_hdl *oh; + struct fuse_file_info fi_out = {0}; + int rc; + bool prefetch = false; rlink = d_hash_rec_find(&dfuse_info->dpi_iet, &ino, sizeof(ino)); if (!rlink) { @@ -34,9 +33,7 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) dfuse_open_handle_init(dfuse_info, oh, ie); - plink = d_hash_rec_find(&dfuse_info->dpi_iet, &ie->ie_parent, sizeof(ie->ie_parent)); - if (plink) - oh->doh_parent_dir = container_of(plink, struct dfuse_inode_entry, ie_htl); + oh->doh_parent_dir = dfuse_inode_lookup(dfuse_info, ie->ie_parent); /* Upgrade fd permissions from O_WRONLY to O_RDWR if wb caching is * enabled so the kernel can do read-modify-write @@ -98,14 +95,10 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) atomic_fetch_add_relaxed(&ie->ie_open_count, 1); - if (prefetch && oh->doh_parent_dir) { - bool use_linear_read = atomic_load_relaxed(&oh->doh_parent_dir->ie_linear_read); - - prefetch = use_linear_read; - } - /* Enable this for files up to the max read size. */ - if (prefetch && ie->ie_stat.st_size > 0 && ie->ie_stat.st_size <= DFUSE_MAX_READ) { + if (prefetch && oh->doh_parent_dir && + atomic_load_relaxed(&oh->doh_parent_dir->ie_linear_read) && ie->ie_stat.st_size > 0 && + ie->ie_stat.st_size <= DFUSE_MAX_READ) { D_ALLOC_PTR(oh->doh_readahead); if (oh->doh_readahead) { D_MUTEX_INIT(&oh->doh_readahead->dra_lock, 0); @@ -214,7 +207,7 @@ dfuse_cb_release(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) atomic_store_relaxed(&oh->doh_parent_dir->ie_linear_read, use_linear_read); - d_hash_rec_decref(&dfuse_info->dpi_iet, &oh->doh_parent_dir->ie_htl); + dfuse_inode_decref(dfuse_info, oh->doh_parent_dir); } dfuse_oh_free(dfuse_info, oh); } From 7b8aafd8f36fbce5efa2c920b4a84fef73f7a11c Mon Sep 17 00:00:00 2001 From: wangdi Date: Mon, 3 Jul 2023 09:01:49 -0700 Subject: [PATCH 14/20] DAOS-13493 rebuild: upgrate the whole group (#12550) During object upgrade, dkey might be migrate to different shards within the group due to EC parity rotation, so all shards within the group should do fetch and update during migration, otherwise some dkeys might be missing after upgrade. Upgrade should do group special enumeration for the moment, since 2.4 does not change dkey->group mapping. Add tests to verify it. Signed-off-by: Di Wang # ------------------------ >8 ------------------------ Skip-func-hw-test: true Skip-func-test: true Quick-Functional: true Test-tag: dfuse --- src/include/daos_srv/daos_engine.h | 2 +- src/object/obj_layout.c | 32 +++++++++---- src/object/srv_obj_migrate.c | 17 +++---- src/rebuild/scan.c | 4 +- src/tests/suite/daos_upgrade.c | 72 ++++++++++++++++++++++++++++++ 5 files changed, 104 insertions(+), 23 deletions(-) diff --git a/src/include/daos_srv/daos_engine.h b/src/include/daos_srv/daos_engine.h index 7175a6df1b1..ab3d87ccc55 100644 --- a/src/include/daos_srv/daos_engine.h +++ b/src/include/daos_srv/daos_engine.h @@ -779,7 +779,7 @@ ds_migrate_stop(struct ds_pool *pool, uint32_t ver, unsigned int generation); int obj_layout_diff(struct pl_map *map, daos_unit_oid_t oid, uint32_t new_ver, uint32_t old_ver, - struct daos_obj_md *md, uint32_t *tgt, uint32_t *shard_p); + struct daos_obj_md *md, uint32_t *tgts, uint32_t *shards, int array_size); /** Server init state (see server_init) */ enum dss_init_state { diff --git a/src/object/obj_layout.c b/src/object/obj_layout.c index 030b0f09c4d..189261ad31e 100644 --- a/src/object/obj_layout.c +++ b/src/object/obj_layout.c @@ -40,7 +40,7 @@ obj_pl_place(struct pl_map *map, uint16_t layout_gl_ver, struct daos_obj_md *md, /* Find out the difference between different layouts */ int obj_layout_diff(struct pl_map *map, daos_unit_oid_t oid, uint32_t new_ver, uint32_t old_ver, - struct daos_obj_md *md, uint32_t *tgt, uint32_t *shard_p) + struct daos_obj_md *md, uint32_t *tgts, uint32_t *shards, int array_size) { struct pl_obj_layout *new_layout = NULL; struct pl_obj_layout *old_layout = NULL; @@ -58,18 +58,32 @@ obj_layout_diff(struct pl_map *map, daos_unit_oid_t oid, uint32_t new_ver, uint3 if (rc) D_GOTO(out, rc); - if (new_layout->ol_shards[shard].po_target != old_layout->ol_shards[shard].po_target) { - *tgt = new_layout->ol_shards[shard].po_target; - *shard_p = shard; - D_GOTO(out, rc = 1); - } - /* If the new layout changes dkey placement, i.e. dkey->grp, dkey->ec_start changes, * then all shards needs to be changed. */ if (new_ver == 1 && daos_obj_id_is_ec(oid.id_pub)) { - *tgt = new_layout->ol_shards[shard].po_target; - *shard_p = shard; + struct daos_oclass_attr *oc; + unsigned int grp_size; + unsigned int grp_start; + int i; + + oc = daos_oclass_attr_find(oid.id_pub, NULL); + D_ASSERT(oc != NULL); + grp_size = daos_oclass_grp_size(oc); + + D_ASSERT(grp_size < array_size); + grp_start = (shard / grp_size) * grp_size; + for (i = 0; i < grp_size; i++) { + tgts[i] = new_layout->ol_shards[grp_start + i].po_target; + shards[i] = grp_start + i; + D_DEBUG(DB_TRACE, "i %d tgts[i] %u shards %u grp_size %u\n", i, tgts[i], shards[i], grp_size); + } + D_GOTO(out, rc = grp_size); + } + + if (new_layout->ol_shards[shard].po_target != old_layout->ol_shards[shard].po_target) { + *tgts = new_layout->ol_shards[shard].po_target; + *shards = shard; D_GOTO(out, rc = 1); } diff --git a/src/object/srv_obj_migrate.c b/src/object/srv_obj_migrate.c index c9519ba2139..c08f290190b 100644 --- a/src/object/srv_obj_migrate.c +++ b/src/object/srv_obj_migrate.c @@ -2588,19 +2588,12 @@ migrate_one_epoch_object(daos_epoch_range_t *epr, struct migrate_pool_tls *tls, memset(&anchor, 0, sizeof(anchor)); memset(&akey_anchor, 0, sizeof(akey_anchor)); memset(&dkey_anchor, 0, sizeof(dkey_anchor)); - if (tls->mpt_opc == RB_OP_UPGRADE) { - enum_flags = DIOF_TO_LEADER | DIOF_WITH_SPEC_EPOCH | - DIOF_FOR_MIGRATION; + if (tls->mpt_opc == RB_OP_UPGRADE) unpack_arg.new_layout_ver = tls->mpt_new_layout_ver; - if (!daos_oclass_is_ec(&unpack_arg.oc_attr)) { - dc_obj_shard2anchor(&dkey_anchor, arg->shard); - enum_flags |= DIOF_TO_SPEC_GROUP; - } - } else { - dc_obj_shard2anchor(&dkey_anchor, arg->shard); - enum_flags = DIOF_TO_LEADER | DIOF_WITH_SPEC_EPOCH | - DIOF_TO_SPEC_GROUP | DIOF_FOR_MIGRATION; - } + + dc_obj_shard2anchor(&dkey_anchor, arg->shard); + enum_flags = DIOF_TO_LEADER | DIOF_WITH_SPEC_EPOCH | + DIOF_TO_SPEC_GROUP | DIOF_FOR_MIGRATION; if (daos_oclass_is_ec(&unpack_arg.oc_attr)) { diff --git a/src/rebuild/scan.c b/src/rebuild/scan.c index a738feab06d..7259062efae 100644 --- a/src/rebuild/scan.c +++ b/src/rebuild/scan.c @@ -738,7 +738,8 @@ rebuild_obj_scan_cb(daos_handle_t ch, vos_iter_entry_t *ent, case RB_OP_UPGRADE: if (oid.id_layout_ver < rpt->rt_new_layout_ver) { rc = obj_layout_diff(map, oid, rpt->rt_new_layout_ver, - arg->co_props.dcp_obj_version, &md, tgts, shards); + arg->co_props.dcp_obj_version, &md, + tgts, shards, LOCAL_ARRAY_SIZE); /* Then only upgrade the layout version */ if (rc == 0) { rc = vos_obj_layout_upgrade(param->ip_hdl, oid, @@ -758,6 +759,7 @@ rebuild_obj_scan_cb(daos_handle_t ch, vos_iter_entry_t *ent, D_GOTO(out, rc); } + D_DEBUG(DB_REBUILD, "rebuild obj "DF_UOID" rebuild_nr %d\n", DP_UOID(oid), rc); rebuild_nr = rc; rc = 0; for (i = 0; i < rebuild_nr; i++) { diff --git a/src/tests/suite/daos_upgrade.c b/src/tests/suite/daos_upgrade.c index 0fdcd6d5c94..1f76def3b1e 100644 --- a/src/tests/suite/daos_upgrade.c +++ b/src/tests/suite/daos_upgrade.c @@ -88,6 +88,76 @@ upgrade_ec_parity_rotate(void **state) test_teardown((void **)&new_arg); } +static void +upgrade_ec_parity_rotate_single_dkey(void **state) +{ + test_arg_t *arg = *state; + test_arg_t *new_arg = NULL; + struct ioreq req; + daos_obj_id_t oid; + char buf[10]; + int rc; + + if (!test_runable(arg, 6)) + return; + + if (arg->myrank == 0) { + rc = daos_debug_set_params(arg->group, -1, DMG_KEY_FAIL_LOC, + DAOS_FAIL_POOL_CREATE_VERSION | DAOS_FAIL_ALWAYS, + 0, NULL); + assert_rc_equal(rc, 0); + rc = daos_debug_set_params(arg->group, -1, DMG_KEY_FAIL_VALUE, + 0, 0, 0); + assert_rc_equal(rc, 0); + } + + /* create/connect another pool */ + rc = test_setup((void **)&new_arg, SETUP_CONT_CONNECT, arg->multi_rank, + SMALL_POOL_SIZE, 0, NULL); + assert_rc_equal(rc, 0); + + oid = daos_test_oid_gen(new_arg->coh, OC_EC_4P1GX, 0, 0, new_arg->myrank); + ioreq_init(&req, new_arg->coh, oid, DAOS_IOD_ARRAY, new_arg); + + insert_single("upgrade_dkey", "upgrade_akey", 0, "data", + strlen("data") + 1, DAOS_TX_NONE, &req); + + insert_single("upgrade_dkey1", "upgrade_akey1", 0, "data", + strlen("data") + 1, DAOS_TX_NONE, &req); + + + ioreq_fini(&req); + + if (arg->myrank == 0) { + rc = daos_debug_set_params(arg->group, -1, DMG_KEY_FAIL_LOC, + DAOS_FORCE_OBJ_UPGRADE | DAOS_FAIL_ALWAYS, + 0, NULL); + assert_rc_equal(rc, 0); + } + + rc = daos_pool_upgrade(new_arg->pool.pool_uuid); + assert_rc_equal(rc, 0); + + print_message("sleep 50 seconds for upgrade to finish!\n"); + sleep(50); + + rebuild_pool_connect_internal(new_arg); + ioreq_init(&req, new_arg->coh, oid, DAOS_IOD_ARRAY, new_arg); + memset(buf, 0, 10); + lookup_single("upgrade_dkey", "upgrade_akey", 0, + buf, 10, DAOS_TX_NONE, &req); + assert_int_equal(req.iod[0].iod_size, strlen(buf) + 1); + assert_string_equal(buf, "data"); + + lookup_single("upgrade_dkey1", "upgrade_akey1", 0, + buf, 10, DAOS_TX_NONE, &req); + assert_int_equal(req.iod[0].iod_size, strlen(buf) + 1); + assert_string_equal(buf, "data"); + + ioreq_fini(&req); + test_teardown((void **)&new_arg); +} + int upgrade_sub_setup(void **state) { @@ -113,6 +183,8 @@ upgrade_sub_setup(void **state) static const struct CMUnitTest upgrade_tests[] = { {"UPGRADE0: upgrade object ec parity layout", upgrade_ec_parity_rotate, upgrade_sub_setup, test_teardown}, + {"UPGRADE1: upgrade single dkey", + upgrade_ec_parity_rotate_single_dkey, upgrade_sub_setup, test_teardown}, }; int From fd554cd7d5d4b3744d1e18ef47b1096a1210e517 Mon Sep 17 00:00:00 2001 From: Michael MacDonald Date: Mon, 3 Jul 2023 16:08:55 -0400 Subject: [PATCH 15/20] DAOS-13471 control: Add JSON-format version to utilities (#12508) Allow consumers to grab structured build/version information. Centralizes JSON output logic to a single implementation in the cmdutil package. Includes updates to control/version.py to use the structured version information instead of scraping stdout. Signed-off-by: Michael MacDonald # ------------------------ >8 ------------------------ Skip-func-hw-test: true Skip-func-test: true Quick-Functional: true Test-tag: dfuse --- src/control/build/string.go | 19 ++++ src/control/cmd/daos/acl.go | 12 +-- src/control/cmd/daos/container.go | 38 +++---- src/control/cmd/daos/filesystem.go | 8 +- src/control/cmd/daos/main.go | 97 +++--------------- src/control/cmd/daos/object.go | 4 +- src/control/cmd/daos/pool.go | 20 ++-- src/control/cmd/daos/snapshot.go | 8 +- src/control/cmd/daos/system.go | 4 +- src/control/cmd/daos/util.go | 2 +- src/control/cmd/daos_agent/attachinfo.go | 14 +-- src/control/cmd/daos_agent/main.go | 59 ++++------- src/control/cmd/daos_agent/network.go | 14 +-- src/control/cmd/daos_server/main.go | 28 +++++- src/control/cmd/dmg/auto.go | 7 +- src/control/cmd/dmg/cont.go | 7 +- src/control/cmd/dmg/firmware.go | 13 +-- src/control/cmd/dmg/main.go | 98 +++---------------- src/control/cmd/dmg/network.go | 7 +- src/control/cmd/dmg/pool.go | 47 ++++----- src/control/cmd/dmg/server.go | 7 +- src/control/cmd/dmg/storage.go | 25 ++--- src/control/cmd/dmg/storage_query.go | 21 ++-- src/control/cmd/dmg/support.go | 11 ++- src/control/cmd/dmg/system.go | 67 ++++++------- src/control/cmd/dmg/telemetry.go | 23 ++--- src/control/common/cmdutil/json.go | 92 +++++++++++++++++ src/control/lib/atm/bool.go | 7 ++ src/control/lib/control/network.go | 4 +- .../lib/hardware/hwprov/topology_cmd.go | 14 +-- src/tests/ftest/control/version.py | 70 ++++--------- src/tests/ftest/util/daos_utils.py | 7 +- src/tests/ftest/util/dmg_utils.py | 7 +- src/tests/ftest/util/server_utils_base.py | 6 +- 34 files changed, 407 insertions(+), 460 deletions(-) create mode 100644 src/control/common/cmdutil/json.go diff --git a/src/control/build/string.go b/src/control/build/string.go index dc58b521091..7f8c6a97fde 100644 --- a/src/control/build/string.go +++ b/src/control/build/string.go @@ -7,6 +7,7 @@ package build import ( + "encoding/json" "fmt" "strings" ) @@ -36,3 +37,21 @@ func revString(version string) string { func String(name string) string { return fmt.Sprintf("%s version %s", name, revString(DaosVersion)) } + +// MarshalJSON returns a JSON string containing a structured representation of +// the binary build info. +func MarshalJSON(name string) ([]byte, error) { + return json.Marshal(&struct { + Name string `json:"name"` + Version string `json:"version"` + Revision string `json:"revision,omitempty"` + Dirty bool `json:"dirty,omitempty"` + Release bool `json:"release,omitempty"` + }{ + Name: name, + Version: DaosVersion, + Revision: Revision, + Dirty: DirtyBuild, + Release: ReleaseBuild, + }) +} diff --git a/src/control/cmd/daos/acl.go b/src/control/cmd/daos/acl.go index 9b6f1b3673d..d2533f44b47 100644 --- a/src/control/cmd/daos/acl.go +++ b/src/control/cmd/daos/acl.go @@ -121,12 +121,11 @@ func (cmd *aclCmd) getACL(ap *C.struct_cmd_args_s) (*control.AccessControlList, } func (cmd *aclCmd) outputACL(out io.Writer, acl *control.AccessControlList, verbose bool) error { - if cmd.jsonOutputEnabled() { - cmd.wroteJSON.SetTrue() - return outputJSON(out, acl, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(acl, nil) } - _, err := fmt.Fprintf(out, control.FormatACL(acl, verbose)) + _, err := fmt.Fprint(out, control.FormatACL(acl, verbose)) return err } @@ -406,9 +405,8 @@ func (cmd *containerSetOwnerCmd) Execute(args []string) error { cmd.ContainerID()) } - if cmd.jsonOutputEnabled() { - cmd.wroteJSON.SetTrue() - return outputJSON(os.Stdout, nil, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(nil, nil) } var contID string diff --git a/src/control/cmd/daos/container.go b/src/control/cmd/daos/container.go index a5494dc2ea3..48a3da8bd09 100644 --- a/src/control/cmd/daos/container.go +++ b/src/control/cmd/daos/container.go @@ -314,8 +314,8 @@ func (cmd *containerCreateCmd) Execute(_ []string) (err error) { ci.ContainerLabel = cmd.Args.Label } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(ci, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(ci, nil) } var bld strings.Builder @@ -750,8 +750,8 @@ func (cmd *containerListCmd) Execute(_ []string) error { "unable to list containers for pool %s", cmd.PoolID()) } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(contIDs, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(contIDs, nil) } var bld strings.Builder @@ -903,7 +903,7 @@ func (cmd *containerListObjectsCmd) Execute(_ []string) error { for i := C.uint32_t(0); i < readOids; i++ { oid := fmt.Sprintf("%d.%d", oidArr[i].hi, oidArr[i].lo) - if !cmd.jsonOutputEnabled() { + if !cmd.JSONOutputEnabled() { cmd.Infof("%s", oid) continue } @@ -911,8 +911,8 @@ func (cmd *containerListObjectsCmd) Execute(_ []string) error { } } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(oids, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(oids, nil) } return nil @@ -1035,8 +1035,8 @@ func (cmd *containerQueryCmd) Execute(_ []string) error { cmd.contUUID) } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(ci, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(ci, nil) } var bld strings.Builder @@ -1080,8 +1080,8 @@ func (cmd *containerCloneCmd) Execute(_ []string) error { return errors.Wrapf(err, "failed to clone container %s", cmd.Source) } - if cmd.shouldEmitJSON { - return cmd.outputJSON(struct { + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(struct { SourcePool string `json:"src_pool"` SourceCont string `json:"src_cont"` DestPool string `json:"dst_pool"` @@ -1160,11 +1160,11 @@ func (cmd *containerListAttrsCmd) Execute(args []string) error { cmd.ContainerID()) } - if cmd.jsonOutputEnabled() { + if cmd.JSONOutputEnabled() { if cmd.Verbose { - return cmd.outputJSON(attrs.asMap(), nil) + return cmd.OutputJSON(attrs.asMap(), nil) } - return cmd.outputJSON(attrs.asList(), nil) + return cmd.OutputJSON(attrs.asList(), nil) } var bld strings.Builder @@ -1258,12 +1258,12 @@ func (cmd *containerGetAttrCmd) Execute(args []string) error { return errors.Wrapf(err, "failed to get attributes from container %s", cmd.ContainerID()) } - if cmd.jsonOutputEnabled() { + if cmd.JSONOutputEnabled() { // Maintain compatibility with older behavior. if len(cmd.Args.Attrs.ParsedProps) == 1 && len(attrs) == 1 { - return cmd.outputJSON(attrs[0], nil) + return cmd.OutputJSON(attrs[0], nil) } - return cmd.outputJSON(attrs, nil) + return cmd.OutputJSON(attrs, nil) } var bld strings.Builder @@ -1398,8 +1398,8 @@ func (cmd *containerGetPropCmd) Execute(args []string) error { } } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(props, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(props, nil) } title := fmt.Sprintf("Properties for container %s", cmd.ContainerID()) diff --git a/src/control/cmd/daos/filesystem.go b/src/control/cmd/daos/filesystem.go index 902d07e91c0..1215ffc72ba 100644 --- a/src/control/cmd/daos/filesystem.go +++ b/src/control/cmd/daos/filesystem.go @@ -70,14 +70,14 @@ func (cmd *fsCopyCmd) Execute(_ []string) error { return errors.Wrapf(err, "failed to copy %s -> %s", cmd.Source, cmd.Dest) } - if cmd.shouldEmitJSON { + if cmd.JSONOutputEnabled() { type CopyStats struct { NumDirs uint64 `json:"num_dirs"` NumFiles uint64 `json:"num_files"` NumLinks uint64 `json:"num_links"` } - return cmd.outputJSON(struct { + return cmd.OutputJSON(struct { SourcePool string `json:"src_pool"` SourceCont string `json:"src_cont"` DestPool string `json:"dst_pool"` @@ -263,7 +263,7 @@ func (cmd *fsGetAttrCmd) Execute(_ []string) error { var oclassName [16]C.char C.daos_oclass_id2name(attrs.doi_oclass_id, &oclassName[0]) - if cmd.jsonOutputEnabled() { + if cmd.JSONOutputEnabled() { jsonAttrs := &struct { ObjClass string `json:"oclass"` ChunkSize uint64 `json:"chunk_size"` @@ -271,7 +271,7 @@ func (cmd *fsGetAttrCmd) Execute(_ []string) error { ObjClass: C.GoString(&oclassName[0]), ChunkSize: uint64(attrs.doi_chunk_size), } - return cmd.outputJSON(jsonAttrs, nil) + return cmd.OutputJSON(jsonAttrs, nil) } cmd.Infof("Object Class = %s", C.GoString(&oclassName[0])) diff --git a/src/control/cmd/daos/main.go b/src/control/cmd/daos/main.go index 7469c4a5a8c..377aa22ad1e 100644 --- a/src/control/cmd/daos/main.go +++ b/src/control/cmd/daos/main.go @@ -9,7 +9,6 @@ package main import ( "encoding/json" "fmt" - "io" "os" "path" "runtime/debug" @@ -21,81 +20,9 @@ import ( "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/fault" "github.com/daos-stack/daos/src/control/lib/atm" - "github.com/daos-stack/daos/src/control/lib/daos" "github.com/daos-stack/daos/src/control/logging" ) -type ( - jsonOutputter interface { - enableJsonOutput(bool, io.Writer, *atm.Bool) - jsonOutputEnabled() bool - outputJSON(interface{}, error) error - errorJSON(error) error - } - - jsonOutputCmd struct { - wroteJSON *atm.Bool - writer io.Writer - shouldEmitJSON bool - } -) - -func (cmd *jsonOutputCmd) enableJsonOutput(emitJson bool, w io.Writer, wj *atm.Bool) { - cmd.shouldEmitJSON = emitJson - cmd.writer = w - cmd.wroteJSON = wj -} - -func (cmd *jsonOutputCmd) jsonOutputEnabled() bool { - return cmd.shouldEmitJSON -} - -func outputJSON(out io.Writer, in interface{}, cmdErr error) error { - status := 0 - var errStr *string - if cmdErr != nil { - errStr = func() *string { str := cmdErr.Error(); return &str }() - if s, ok := errors.Cause(cmdErr).(daos.Status); ok { - status = int(s) - } else { - status = int(daos.MiscError) - } - } - - data, err := json.MarshalIndent(struct { - Response interface{} `json:"response"` - Error *string `json:"error"` - Status int `json:"status"` - }{in, errStr, status}, "", " ") - if err != nil { - return err - } - - if _, err = out.Write(append(data, []byte("\n")...)); err != nil { - return err - } - - return cmdErr -} - -func (cmd *jsonOutputCmd) outputJSON(in interface{}, cmdErr error) error { - if cmd.wroteJSON.IsTrue() { - return cmdErr - } - cmd.wroteJSON.SetTrue() - return outputJSON(cmd.writer, in, cmdErr) -} - -func errorJSON(err error) error { - return outputJSON(os.Stdout, nil, err) -} - -func (cmd *jsonOutputCmd) errorJSON(err error) error { - return cmd.outputJSON(nil, err) -} - -var _ jsonOutputter = (*jsonOutputCmd)(nil) - type cliOptions struct { Debug bool `long:"debug" description:"enable debug output"` Verbose bool `long:"verbose" description:"enable verbose output (when applicable)"` @@ -109,9 +36,19 @@ type cliOptions struct { ManPage cmdutil.ManCmd `command:"manpage" hidden:"true"` } -type versionCmd struct{} +type versionCmd struct { + cmdutil.JSONOutputCmd +} func (cmd *versionCmd) Execute(_ []string) error { + if cmd.JSONOutputEnabled() { + buf, err := build.MarshalJSON(build.CLIUtilName) + if err != nil { + return err + } + return cmd.OutputJSON(json.RawMessage(buf), nil) + } + fmt.Printf("%s, libdaos v%s\n", build.String(build.CLIUtilName), apiVersion()) os.Exit(0) return nil @@ -157,12 +94,10 @@ or query/manage an object inside a container.` log.Debug("debug output enabled") } - if jsonCmd, ok := cmd.(jsonOutputter); ok { - jsonCmd.enableJsonOutput(opts.JSON, os.Stdout, &wroteJSON) - if opts.JSON { - // disable output on stdout other than JSON - log.ClearLevel(logging.LogLevelInfo) - } + if jsonCmd, ok := cmd.(cmdutil.JSONOutputter); ok && opts.JSON { + jsonCmd.EnableJSONOutput(os.Stdout, &wroteJSON) + // disable output on stdout other than JSON + log.ClearLevel(logging.LogLevelInfo) } if logCmd, ok := cmd.(cmdutil.LogSetter); ok { @@ -228,7 +163,7 @@ or query/manage an object inside a container.` _, err = p.ParseArgs(args) if opts.JSON && wroteJSON.IsFalse() { - return errorJSON(err) + return cmdutil.OutputJSON(os.Stdout, nil, err) } return err } diff --git a/src/control/cmd/daos/object.go b/src/control/cmd/daos/object.go index 542edd894d3..79d14337917 100644 --- a/src/control/cmd/daos/object.go +++ b/src/control/cmd/daos/object.go @@ -149,8 +149,8 @@ func (cmd *objQueryCmd) Execute(_ []string) error { defer C.daos_obj_layout_free(cLayout) layout := newObjLayout(oid, cLayout) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(layout, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(layout, nil) } // TODO: Revisit this output to make it more non-developer friendly. diff --git a/src/control/cmd/daos/pool.go b/src/control/cmd/daos/pool.go index b06b8e47288..aab4fb9c4a7 100644 --- a/src/control/cmd/daos/pool.go +++ b/src/control/cmd/daos/pool.go @@ -338,8 +338,8 @@ func (cmd *poolQueryCmd) Execute(_ []string) error { } } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(pqr, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(pqr, nil) } var bld strings.Builder @@ -410,8 +410,8 @@ func (cmd *poolQueryTargetsCmd) Execute(_ []string) error { } } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(infoResp, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(infoResp, nil) } var bld strings.Builder @@ -443,11 +443,11 @@ func (cmd *poolListAttrsCmd) Execute(_ []string) error { "failed to list attributes for pool %s", cmd.poolUUID) } - if cmd.jsonOutputEnabled() { + if cmd.JSONOutputEnabled() { if cmd.Verbose { - return cmd.outputJSON(attrs.asMap(), nil) + return cmd.OutputJSON(attrs.asMap(), nil) } - return cmd.outputJSON(attrs.asList(), nil) + return cmd.OutputJSON(attrs.asList(), nil) } var bld strings.Builder @@ -484,12 +484,12 @@ func (cmd *poolGetAttrCmd) Execute(_ []string) error { return errors.Wrapf(err, "failed to get attributes for pool %s", cmd.PoolID()) } - if cmd.jsonOutputEnabled() { + if cmd.JSONOutputEnabled() { // Maintain compatibility with older behavior. if len(cmd.Args.Attrs.ParsedProps) == 1 && len(attrs) == 1 { - return cmd.outputJSON(attrs[0], nil) + return cmd.OutputJSON(attrs[0], nil) } - return cmd.outputJSON(attrs, nil) + return cmd.OutputJSON(attrs, nil) } var bld strings.Builder diff --git a/src/control/cmd/daos/snapshot.go b/src/control/cmd/daos/snapshot.go index 6e5bda3d0be..f06e517e117 100644 --- a/src/control/cmd/daos/snapshot.go +++ b/src/control/cmd/daos/snapshot.go @@ -56,8 +56,8 @@ func (cmd *containerSnapCreateCmd) Execute(args []string) error { return errors.Wrapf(err, "failed to create snapshot of container %s", cmd.ContainerID()) } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(snapshot{ + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(snapshot{ Epoch: uint64(cEpoch), Timestamp: common.FormatTime(daos.HLC(cEpoch).ToTime()), Name: cmd.Name, @@ -229,8 +229,8 @@ func (cmd *containerSnapListCmd) Execute(args []string) error { return err } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(snaps, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(snaps, nil) } cmd.Info("Container's snapshots :") diff --git a/src/control/cmd/daos/system.go b/src/control/cmd/daos/system.go index 9d8008173ea..8856c285214 100644 --- a/src/control/cmd/daos/system.go +++ b/src/control/cmd/daos/system.go @@ -56,8 +56,8 @@ func (cmd *systemQueryCmd) Execute(_ []string) error { }) } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(sysInfo, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(sysInfo, nil) } cmd.Infof("connected to DAOS system:") diff --git a/src/control/cmd/daos/util.go b/src/control/cmd/daos/util.go index b5ebb7d90e0..4297fdd32eb 100644 --- a/src/control/cmd/daos/util.go +++ b/src/control/cmd/daos/util.go @@ -253,7 +253,7 @@ type daosCaller interface { type daosCmd struct { cmdutil.NoArgsCmd - jsonOutputCmd + cmdutil.JSONOutputCmd cmdutil.LogCmd } diff --git a/src/control/cmd/daos_agent/attachinfo.go b/src/control/cmd/daos_agent/attachinfo.go index 50aedd33daa..76e42e53974 100644 --- a/src/control/cmd/daos_agent/attachinfo.go +++ b/src/control/cmd/daos_agent/attachinfo.go @@ -8,12 +8,12 @@ package main import ( "context" - "encoding/json" "fmt" "os" "github.com/pkg/errors" + "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" "github.com/daos-stack/daos/src/control/lib/txtfmt" ) @@ -21,8 +21,8 @@ import ( type dumpAttachInfoCmd struct { configCmd ctlInvokerCmd + cmdutil.JSONOutputCmd Output string `short:"o" long:"output" default:"stdout" description:"Dump output to this location"` - JSON bool `short:"j" long:"json" description:"Enable JSON output"` } func (cmd *dumpAttachInfoCmd) Execute(_ []string) error { @@ -46,14 +46,8 @@ func (cmd *dumpAttachInfoCmd) Execute(_ []string) error { return errors.Wrap(err, "GetAttachInfo failed") } - if cmd.JSON { - data, err := json.MarshalIndent(resp, "", " ") - if err != nil { - return err - } - - _, err = out.Write(append(data, []byte("\n")...)) - return err + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } system := cmd.cfg.SystemName diff --git a/src/control/cmd/daos_agent/main.go b/src/control/cmd/daos_agent/main.go index 3462a3e9711..8f5c135b4d9 100644 --- a/src/control/cmd/daos_agent/main.go +++ b/src/control/cmd/daos_agent/main.go @@ -9,7 +9,6 @@ package main import ( "encoding/json" "fmt" - "io" "os" "path" @@ -19,6 +18,7 @@ import ( "github.com/daos-stack/daos/src/control/build" "github.com/daos-stack/daos/src/control/common" "github.com/daos-stack/daos/src/control/common/cmdutil" + "github.com/daos-stack/daos/src/control/lib/atm" "github.com/daos-stack/daos/src/control/lib/control" "github.com/daos-stack/daos/src/control/lib/hardware/hwprov" "github.com/daos-stack/daos/src/control/logging" @@ -69,43 +69,23 @@ func (cmd *configCmd) setConfig(cfg *Config) { cmd.cfg = cfg } -type ( - jsonOutputter interface { - enableJsonOutput(bool) - jsonOutputEnabled() bool - outputJSON(io.Writer, interface{}) error - } - - jsonOutputCmd struct { - shouldEmitJSON bool - } -) - -func (cmd *jsonOutputCmd) enableJsonOutput(emitJson bool) { - cmd.shouldEmitJSON = emitJson -} - -func (cmd *jsonOutputCmd) jsonOutputEnabled() bool { - return cmd.shouldEmitJSON -} - -func (cmd *jsonOutputCmd) outputJSON(out io.Writer, in interface{}) error { - data, err := json.MarshalIndent(in, "", " ") - if err != nil { - return err - } - - _, err = out.Write(append(data, []byte("\n")...)) - return err -} - func versionString() string { return build.String(build.AgentName) } -type versionCmd struct{} +type versionCmd struct { + cmdutil.JSONOutputCmd +} func (cmd *versionCmd) Execute(_ []string) error { + if cmd.JSONOutputEnabled() { + buf, err := build.MarshalJSON(build.AgentName) + if err != nil { + return err + } + return cmd.OutputJSON(json.RawMessage(buf), nil) + } + _, err := fmt.Println(versionString()) return err } @@ -135,10 +115,10 @@ func (cmd *supportAgentConfigCmd) getSupportConf() string { } func parseOpts(args []string, opts *cliOptions, invoker control.Invoker, log *logging.LeveledLogger) error { + var wroteJSON atm.Bool p := flags.NewParser(opts, flags.Default) p.Options ^= flags.PrintErrors // Don't allow the library to print errors p.SubcommandsOptional = true - p.CommandHandler = func(cmd flags.Commander, args []string) error { if len(args) > 0 { exitWithError(log, errors.Errorf("unknown command %q", args[0])) @@ -152,8 +132,10 @@ func parseOpts(args []string, opts *cliOptions, invoker control.Invoker, log *lo logCmd.SetLog(log) } - if jsonCmd, ok := cmd.(jsonOutputter); ok { - jsonCmd.enableJsonOutput(opts.JSON) + if jsonCmd, ok := cmd.(cmdutil.JSONOutputter); ok && opts.JSON { + jsonCmd.EnableJSONOutput(os.Stdout, &wroteJSON) + // disable output on stdout other than JSON + log.ClearLevel(logging.LogLevelInfo) } if opts.Debug { @@ -257,11 +239,12 @@ func parseOpts(args []string, opts *cliOptions, invoker control.Invoker, log *lo ctlCmd.setInvoker(invoker) } - if err := cmd.Execute(args); err != nil { - return err + err = cmd.Execute(args) + if opts.JSON && wroteJSON.IsFalse() { + cmdutil.OutputJSON(os.Stdout, nil, err) } - return nil + return err } _, err := p.Parse() diff --git a/src/control/cmd/daos_agent/network.go b/src/control/cmd/daos_agent/network.go index 2c5b3a55dde..42c3407ca49 100644 --- a/src/control/cmd/daos_agent/network.go +++ b/src/control/cmd/daos_agent/network.go @@ -8,7 +8,6 @@ package main import ( "context" - "os" "strings" "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" @@ -21,17 +20,10 @@ import ( type netScanCmd struct { cmdutil.LogCmd - jsonOutputCmd + cmdutil.JSONOutputCmd FabricProvider string `short:"p" long:"provider" description:"Filter device list to those that support the given OFI provider or 'all' for all available (default is all local providers)"` } -func (cmd *netScanCmd) printUnlessJson(fmtStr string, args ...interface{}) { - if cmd.jsonOutputEnabled() { - return - } - cmd.Infof(fmtStr, args...) -} - func (cmd *netScanCmd) Execute(_ []string) error { var prov string if !strings.EqualFold(cmd.FabricProvider, "all") { @@ -51,8 +43,8 @@ func (cmd *netScanCmd) Execute(_ []string) error { return err } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(os.Stdout, hfm) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(hfm, nil) } var bld strings.Builder diff --git a/src/control/cmd/daos_server/main.go b/src/control/cmd/daos_server/main.go index 3a28cce29ec..88c21bf5809 100644 --- a/src/control/cmd/daos_server/main.go +++ b/src/control/cmd/daos_server/main.go @@ -8,6 +8,7 @@ package main import ( "context" + "encoding/json" "fmt" "os" "path" @@ -19,6 +20,7 @@ import ( "github.com/daos-stack/daos/src/control/common" "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/fault" + "github.com/daos-stack/daos/src/control/lib/atm" "github.com/daos-stack/daos/src/control/lib/hardware/hwprov" "github.com/daos-stack/daos/src/control/logging" "github.com/daos-stack/daos/src/control/pbin" @@ -35,6 +37,7 @@ type mainOpts struct { // TODO(DAOS-3129): This should be -d, but it conflicts with the start // subcommand's -d flag when we default to running it. Debug bool `short:"b" long:"debug" description:"Enable debug output"` + JSON bool `long:"json" short:"j" description:"enable JSON output"` JSONLog bool `short:"J" long:"json-logging" description:"Enable JSON-formatted log output"` Syslog bool `long:"syslog" description:"Enable logging to syslog"` @@ -54,9 +57,19 @@ type mainOpts struct { preExecTests []execTestFn } -type versionCmd struct{} +type versionCmd struct { + cmdutil.JSONOutputCmd +} func (cmd *versionCmd) Execute(_ []string) error { + if cmd.JSONOutputEnabled() { + buf, err := build.MarshalJSON(build.ControlPlaneName) + if err != nil { + return err + } + return cmd.OutputJSON(json.RawMessage(buf), nil) + } + fmt.Println(build.String(build.ControlPlaneName)) return nil } @@ -71,6 +84,7 @@ func exitWithError(log *logging.LeveledLogger, err error) { } func parseOpts(args []string, opts *mainOpts, log *logging.LeveledLogger) error { + var wroteJSON atm.Bool p := flags.NewParser(opts, flags.HelpFlag|flags.PassDoubleDash) p.SubcommandsOptional = false p.CommandHandler = func(cmd flags.Commander, cmdArgs []string) error { @@ -79,6 +93,12 @@ func parseOpts(args []string, opts *mainOpts, log *logging.LeveledLogger) error return errors.Errorf("unexpected commandline arguments: %v", cmdArgs) } + if jsonCmd, ok := cmd.(cmdutil.JSONOutputter); ok && opts.JSON { + jsonCmd.EnableJSONOutput(os.Stdout, &wroteJSON) + // disable output on stdout other than JSON + log.ClearLevel(logging.LogLevelInfo) + } + switch cmd.(type) { case *versionCmd: // No pre-exec tests or setup needed for these commands; just @@ -144,11 +164,11 @@ func parseOpts(args []string, opts *mainOpts, log *logging.LeveledLogger) error // Parse commandline flags which override options loaded from config. _, err := p.ParseArgs(args) - if err != nil { - return err + if opts.JSON && wroteJSON.IsFalse() { + cmdutil.OutputJSON(os.Stdout, nil, err) } - return nil + return err } func main() { diff --git a/src/control/cmd/dmg/auto.go b/src/control/cmd/dmg/auto.go index 98a7d9973ff..ccfe54de575 100644 --- a/src/control/cmd/dmg/auto.go +++ b/src/control/cmd/dmg/auto.go @@ -14,6 +14,7 @@ import ( "gopkg.in/yaml.v2" "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" + "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" "github.com/daos-stack/daos/src/control/lib/hardware" "github.com/daos-stack/daos/src/control/server/config" @@ -32,7 +33,7 @@ type configGenCmd struct { cfgCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd AccessPoints string `default:"localhost" short:"a" long:"access-points" description:"Comma separated list of access point addresses "` NrEngines int `short:"e" long:"num-engines" description:"Set the number of DAOS Engine sections to be populated in the config file output. If unset then the value will be set to the number of NUMA nodes on storage hosts in the DAOS system."` @@ -87,8 +88,8 @@ func (cmd *configGenCmd) confGen(ctx context.Context) (*config.Server, error) { req.HostList = hl // TODO: decide whether we want meaningful JSON output - if cmd.jsonOutputEnabled() { - return nil, cmd.outputJSON(nil, errors.New("JSON output not supported")) + if cmd.JSONOutputEnabled() { + return nil, cmd.OutputJSON(nil, errors.New("JSON output not supported")) } cmd.Debugf("control API ConfGenerateRemote called with req: %+v", req) diff --git a/src/control/cmd/dmg/cont.go b/src/control/cmd/dmg/cont.go index 4e2aba52e2b..5ad087b0676 100644 --- a/src/control/cmd/dmg/cont.go +++ b/src/control/cmd/dmg/cont.go @@ -12,6 +12,7 @@ import ( "github.com/jessevdk/go-flags" "github.com/pkg/errors" + "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" "github.com/daos-stack/daos/src/control/lib/ui" ) @@ -25,7 +26,7 @@ type ContCmd struct { type ContSetOwnerCmd struct { baseCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd GroupName ui.ACLPrincipalFlag `short:"g" long:"group" description:"New owner-group for the container, format name@domain"` UserName ui.ACLPrincipalFlag `short:"u" long:"user" description:"New owner-user for the container, format name@domain"` ContUUID string `short:"c" long:"cont" required:"1" description:"UUID of the DAOS container"` @@ -54,6 +55,10 @@ func (c *ContSetOwnerCmd) Execute(args []string) error { msg = errors.WithMessage(err, "FAILED").Error() } + if c.JSONOutputEnabled() { + return c.OutputJSON(nil, err) + } + c.Infof("Container-set-owner command %s\n", msg) return err diff --git a/src/control/cmd/dmg/firmware.go b/src/control/cmd/dmg/firmware.go index c7baa1eef66..16748d37b32 100644 --- a/src/control/cmd/dmg/firmware.go +++ b/src/control/cmd/dmg/firmware.go @@ -12,6 +12,7 @@ import ( "strings" "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" + "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" ) @@ -31,7 +32,7 @@ type firmwareQueryCmd struct { baseCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd DeviceType string `short:"t" long:"type" choice:"nvme" choice:"scm" choice:"all" default:"all" description:"Type of storage devices to query"` Devices string `short:"d" long:"devices" description:"Comma-separated list of device identifiers to query"` ModelID string `short:"m" long:"model" description:"Model ID to filter results by"` @@ -57,8 +58,8 @@ func (cmd *firmwareQueryCmd) Execute(args []string) error { req.SetHostList(cmd.getHostList()) resp, err := control.FirmwareQuery(ctx, cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { @@ -115,7 +116,7 @@ type firmwareUpdateCmd struct { baseCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd DeviceType string `short:"t" long:"type" choice:"nvme" choice:"scm" required:"1" description:"Type of storage devices to update"` FilePath string `short:"p" long:"path" required:"1" description:"Path to the firmware file accessible from all nodes"` Devices string `short:"d" long:"devices" description:"Comma-separated list of device identifiers to update"` @@ -147,8 +148,8 @@ func (cmd *firmwareUpdateCmd) Execute(args []string) error { req.SetHostList(cmd.getHostList()) resp, err := control.FirmwareUpdate(ctx, cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { diff --git a/src/control/cmd/dmg/main.go b/src/control/cmd/dmg/main.go index 45b9274608d..34573d5cada 100644 --- a/src/control/cmd/dmg/main.go +++ b/src/control/cmd/dmg/main.go @@ -9,7 +9,6 @@ package main import ( "encoding/json" "fmt" - "io" "os" "path" @@ -22,7 +21,6 @@ import ( "github.com/daos-stack/daos/src/control/fault" "github.com/daos-stack/daos/src/control/lib/atm" "github.com/daos-stack/daos/src/control/lib/control" - "github.com/daos-stack/daos/src/control/lib/daos" "github.com/daos-stack/daos/src/control/lib/hostlist" "github.com/daos-stack/daos/src/control/lib/ui" "github.com/daos-stack/daos/src/control/logging" @@ -54,19 +52,6 @@ type ( ctlInvokerCmd struct { ctlInvoker control.Invoker } - - jsonOutputter interface { - enableJsonOutput(bool, io.Writer, *atm.Bool) - jsonOutputEnabled() bool - outputJSON(interface{}, error) error - errorJSON(error) error - } - - jsonOutputCmd struct { - wroteJSON *atm.Bool - writer io.Writer - shouldEmitJSON bool - } ) func (cmd *ctlInvokerCmd) setInvoker(c control.Invoker) { @@ -99,65 +84,6 @@ func (cmd *singleHostCmd) setHostList(newList *hostlist.HostSet) { cmd.HostList.Replace(newList) } -func (cmd *jsonOutputCmd) enableJsonOutput(emitJson bool, w io.Writer, wj *atm.Bool) { - cmd.shouldEmitJSON = emitJson - cmd.writer = w - cmd.wroteJSON = wj -} - -func (cmd *jsonOutputCmd) jsonOutputEnabled() bool { - return cmd.shouldEmitJSON -} - -func outputJSON(out io.Writer, in interface{}, cmdErr error) error { - status := 0 - var errStr *string - if cmdErr != nil { - errStr = new(string) - *errStr = cmdErr.Error() - if s, ok := errors.Cause(cmdErr).(daos.Status); ok { - status = int(s) - } else { - status = int(daos.MiscError) - } - } - - data, err := json.MarshalIndent(struct { - Response interface{} `json:"response"` - Error *string `json:"error"` - Status int `json:"status"` - }{in, errStr, status}, "", " ") - if err != nil { - fmt.Fprintf(out, "unable to marshal json: %s\n", err.Error()) - return err - } - - if _, err = out.Write(append(data, []byte("\n")...)); err != nil { - fmt.Fprintf(out, "unable to write json: %s\n", err.Error()) - return err - } - - return cmdErr -} - -func (cmd *jsonOutputCmd) outputJSON(in interface{}, cmdErr error) error { - if cmd.wroteJSON.IsTrue() { - return cmdErr - } - cmd.wroteJSON.SetTrue() - return outputJSON(cmd.writer, in, cmdErr) -} - -func errorJSON(err error) error { - return outputJSON(os.Stdout, nil, err) -} - -func (cmd *jsonOutputCmd) errorJSON(err error) error { - return cmd.outputJSON(nil, err) -} - -var _ jsonOutputter = (*jsonOutputCmd)(nil) - type cmdLogger interface { setLog(*logging.LeveledLogger) } @@ -205,9 +131,19 @@ type cliOptions struct { ManPage cmdutil.ManCmd `command:"manpage" hidden:"true"` } -type versionCmd struct{} +type versionCmd struct { + cmdutil.JSONOutputCmd +} func (cmd *versionCmd) Execute(_ []string) error { + if cmd.JSONOutputEnabled() { + buf, err := build.MarshalJSON(build.AdminUtilName) + if err != nil { + return err + } + return cmd.OutputJSON(json.RawMessage(buf), nil) + } + fmt.Println(build.String(build.AdminUtilName)) os.Exit(0) return nil @@ -277,12 +213,10 @@ and access control settings, along with system wide operations.` log.WithJSONOutput() } - if jsonCmd, ok := cmd.(jsonOutputter); ok { - jsonCmd.enableJsonOutput(opts.JSON, os.Stdout, &wroteJSON) - if opts.JSON { - // disable output on stdout other than JSON - log.ClearLevel(logging.LogLevelInfo) - } + if jsonCmd, ok := cmd.(cmdutil.JSONOutputter); ok && opts.JSON { + jsonCmd.EnableJSONOutput(os.Stdout, &wroteJSON) + // disable output on stdout other than JSON + log.ClearLevel(logging.LogLevelInfo) } if logCmd, ok := cmd.(cmdutil.LogSetter); ok { @@ -351,7 +285,7 @@ and access control settings, along with system wide operations.` _, err := p.ParseArgs(args) if opts.JSON && wroteJSON.IsFalse() { - return errorJSON(err) + return cmdutil.OutputJSON(os.Stdout, nil, err) } return err } diff --git a/src/control/cmd/dmg/network.go b/src/control/cmd/dmg/network.go index 219c67e4dd7..e55ddadb635 100644 --- a/src/control/cmd/dmg/network.go +++ b/src/control/cmd/dmg/network.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" + "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" ) @@ -26,7 +27,7 @@ type networkScanCmd struct { cfgCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd FabricProvider string `short:"p" long:"provider" description:"Filter device list to those that support the given OFI provider or 'all' for all available (default is the provider specified in daos_server.yml)"` } @@ -42,8 +43,8 @@ func (cmd *networkScanCmd) Execute(_ []string) error { resp, err := control.NetworkScan(ctx, cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { diff --git a/src/control/cmd/dmg/pool.go b/src/control/cmd/dmg/pool.go index f33f2f335db..c229b40256f 100644 --- a/src/control/cmd/dmg/pool.go +++ b/src/control/cmd/dmg/pool.go @@ -19,6 +19,7 @@ import ( "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" "github.com/daos-stack/daos/src/control/common" + "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" "github.com/daos-stack/daos/src/control/lib/ranklist" "github.com/daos-stack/daos/src/control/lib/ui" @@ -187,7 +188,7 @@ type PoolCreateCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd GroupName ui.ACLPrincipalFlag `short:"g" long:"group" description:"DAOS pool to be owned by given group, format name@domain"` UserName ui.ACLPrincipalFlag `short:"u" long:"user" description:"DAOS pool to be owned by given user, format name@domain"` Properties PoolSetPropsFlag `short:"P" long:"properties" description:"Pool properties to be set"` @@ -309,8 +310,8 @@ func (cmd *PoolCreateCmd) Execute(args []string) error { resp, err := control.PoolCreate(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { @@ -355,7 +356,7 @@ type PoolListCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd Verbose bool `short:"v" long:"verbose" description:"Add pool UUIDs and service replica lists to display"` NoQuery bool `short:"n" long:"no-query" description:"Disable query of listed pools"` } @@ -379,8 +380,8 @@ func (cmd *PoolListCmd) Execute(_ []string) (errOut error) { return err // control api returned an error, disregard response } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, nil) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, nil) } var out, outErr strings.Builder @@ -406,7 +407,7 @@ type poolCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd Args struct { Pool PoolID `positional-arg-name:"" required:"1"` @@ -598,8 +599,8 @@ func (cmd *PoolQueryCmd) Execute(args []string) error { resp, err := control.PoolQuery(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { @@ -638,8 +639,8 @@ func (cmd *PoolQueryTargetsCmd) Execute(args []string) error { resp, err := control.PoolQueryTargets(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { @@ -703,8 +704,8 @@ func (cmd *PoolSetPropCmd) Execute(_ []string) error { } err := control.PoolSetProp(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(nil, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(nil, err) } if err != nil { @@ -731,8 +732,8 @@ func (cmd *PoolGetPropCmd) Execute(_ []string) error { } resp, err := control.PoolGetProp(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { @@ -760,8 +761,8 @@ func (cmd *PoolGetACLCmd) Execute(args []string) error { req := &control.PoolGetACLReq{ID: cmd.PoolID().String()} resp, err := control.PoolGetACL(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { @@ -830,8 +831,8 @@ func (cmd *PoolOverwriteACLCmd) Execute(args []string) error { } resp, err := control.PoolOverwriteACL(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { @@ -878,8 +879,8 @@ func (cmd *PoolUpdateACLCmd) Execute(args []string) error { } resp, err := control.PoolUpdateACL(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { @@ -908,8 +909,8 @@ func (cmd *PoolDeleteACLCmd) Execute(args []string) error { } resp, err := control.PoolDeleteACL(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { diff --git a/src/control/cmd/dmg/server.go b/src/control/cmd/dmg/server.go index 463f99877b6..aa4a6e91f22 100644 --- a/src/control/cmd/dmg/server.go +++ b/src/control/cmd/dmg/server.go @@ -13,6 +13,7 @@ import ( "github.com/pkg/errors" "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" + "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" ) @@ -27,7 +28,7 @@ type serverSetLogMasksCmd struct { baseCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd Masks *string `short:"m" long:"masks" description:"Set log masks for a set of facilities to a given level. The input string should look like PREFIX1=LEVEL1,PREFIX2=LEVEL2,... where the syntax is identical to what is expected by 'D_LOG_MASK' environment variable. If the 'PREFIX=' part is omitted, then the level applies to all defined facilities (e.g. a value of 'WARN' sets everything to WARN). If unset then reset engine log masks to use the 'log_mask' value set in the server config file (for each engine) at the time of DAOS system format. Supported levels are FATAL, CRIT, ERR, WARN, NOTE, INFO, DEBUG"` Streams *string `short:"d" long:"streams" description:"Employ finer grained control over debug streams. Mask bits are set as the first argument passed in D_DEBUG(mask, ...) and this input string (DD_MASK) can be set to enable different debug streams. The expected syntax is a comma separated list of stream identifiers and accepted DAOS Debug Streams are md,pl,mgmt,epc,df,rebuild,daos_default and Common Debug Streams (GURT) are any,trace,mem,net,io. If not set, streams will be read from server config file and if set to an empty string then all debug streams will be enabled"` Subsystems *string `short:"s" long:"subsystems" description:"This input string is equivalent to the use of the DD_SUBSYS environment variable and can be set to enable logging for specific subsystems or facilities. The expected syntax is a comma separated list of facility identifiers. Accepted DAOS facilities are common,tree,vos,client,server,rdb,pool,container,object,placement,rebuild,tier,mgmt,bio,tests, Common facilities (GURT) are MISC,MEM and CaRT facilities RPC,BULK,CORPC,GRP,LM,HG,ST,IV If not set, subsystems to enable will be read from server config file and if set to an empty string then logging all subsystems will be enabled"` @@ -55,8 +56,8 @@ func (cmd *serverSetLogMasksCmd) Execute(_ []string) (errOut error) { cmd.Debugf("set log masks response: %+v", resp) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, resp.Errors()) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, resp.Errors()) } var out, outErr strings.Builder diff --git a/src/control/cmd/dmg/storage.go b/src/control/cmd/dmg/storage.go index 818bc59ae35..fea3160f74c 100644 --- a/src/control/cmd/dmg/storage.go +++ b/src/control/cmd/dmg/storage.go @@ -13,6 +13,7 @@ import ( "github.com/pkg/errors" "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" + "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" ) @@ -33,7 +34,7 @@ type storageScanCmd struct { baseCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd Verbose bool `short:"v" long:"verbose" description:"List SCM & NVMe device details"` NvmeHealth bool `short:"n" long:"nvme-health" description:"Display NVMe device health statistics"` NvmeMeta bool `short:"m" long:"nvme-meta" description:"Display server meta data held on NVMe storage"` @@ -67,8 +68,8 @@ func (cmd *storageScanCmd) Execute(_ []string) error { cmd.Debugf("storage scan response: %+v", resp.HostStorage) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, resp.Errors()) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, resp.Errors()) } var outErr strings.Builder @@ -105,7 +106,7 @@ type storageFormatCmd struct { baseCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd Verbose bool `short:"v" long:"verbose" description:"Show results of each SCM & NVMe device format operation"` Force bool `long:"force" description:"Force storage format on a host, stopping any running engines (CAUTION: destructive operation)"` } @@ -124,8 +125,8 @@ func (cmd *storageFormatCmd) Execute(args []string) (err error) { return err } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, resp.Errors()) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, resp.Errors()) } return cmd.printFormatResp(resp) @@ -155,7 +156,7 @@ type nvmeRebindCmd struct { baseCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd PCIAddr string `short:"a" long:"pci-address" required:"1" description:"NVMe SSD PCI address to rebind."` } @@ -177,8 +178,8 @@ func (cmd *nvmeRebindCmd) Execute(args []string) error { return err } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, resp.Errors()) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, resp.Errors()) } var outErr strings.Builder @@ -202,7 +203,7 @@ type nvmeAddDeviceCmd struct { baseCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd PCIAddr string `short:"a" long:"pci-address" required:"1" description:"NVMe SSD PCI address to add."` EngineIndex uint32 `short:"e" long:"engine-index" required:"1" description:"Index of DAOS engine to add NVMe device to."` StorageTierIndex int32 `short:"t" long:"tier-index" default:"-1" description:"Index of storage tier on DAOS engine to add NVMe device to."` @@ -231,8 +232,8 @@ func (cmd *nvmeAddDeviceCmd) Execute(args []string) error { return err } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, resp.Errors()) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, resp.Errors()) } var outErr strings.Builder diff --git a/src/control/cmd/dmg/storage_query.go b/src/control/cmd/dmg/storage_query.go index 86e179f0644..4f30b1b5af9 100644 --- a/src/control/cmd/dmg/storage_query.go +++ b/src/control/cmd/dmg/storage_query.go @@ -14,6 +14,7 @@ import ( "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" "github.com/daos-stack/daos/src/control/common" + "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" "github.com/daos-stack/daos/src/control/lib/ranklist" ) @@ -33,15 +34,15 @@ type smdQueryCmd struct { baseCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd } func (cmd *smdQueryCmd) makeRequest(ctx context.Context, req *control.SmdQueryReq, opts ...pretty.PrintConfigOption) error { req.SetHostList(cmd.getHostList()) resp, err := control.SmdQuery(ctx, cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { @@ -127,7 +128,7 @@ type usageQueryCmd struct { baseCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd } // Execute is run when usageQueryCmd activates. @@ -139,8 +140,8 @@ func (cmd *usageQueryCmd) Execute(_ []string) error { req.SetHostList(cmd.getHostList()) resp, err := control.StorageScan(ctx, cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { @@ -165,15 +166,15 @@ type smdManageCmd struct { baseCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd } func (cmd *smdManageCmd) makeRequest(ctx context.Context, req *control.SmdManageReq, opts ...pretty.PrintConfigOption) error { req.SetHostList(cmd.getHostList()) resp, err := control.SmdManage(ctx, cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { @@ -206,7 +207,7 @@ type nvmeSetFaultyCmd struct { // Set the SMD device state of the given device to "FAULTY" func (cmd *nvmeSetFaultyCmd) Execute(_ []string) error { cmd.Notice("This command will permanently mark the device as unusable!") - if !cmd.Force && !cmd.jsonOutputEnabled() { + if !cmd.Force && !cmd.JSONOutputEnabled() { if !common.GetConsent(cmd.Logger) { return errors.New("consent not given") } diff --git a/src/control/cmd/dmg/support.go b/src/control/cmd/dmg/support.go index 965290905bb..3e3276d0601 100644 --- a/src/control/cmd/dmg/support.go +++ b/src/control/cmd/dmg/support.go @@ -18,6 +18,7 @@ import ( "github.com/pkg/errors" "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" + "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" "github.com/daos-stack/daos/src/control/lib/support" ) @@ -33,7 +34,7 @@ type collectLogCmd struct { cfgCmd ctlInvokerCmd hostListCmd - jsonOutputCmd + cmdutil.JSONOutputCmd support.CollectLogSubCmd } @@ -146,7 +147,7 @@ func (cmd *collectLogCmd) Execute(_ []string) error { // set of support collection steps to show in progress bar progress := support.ProgressBar{ Total: len(LogCollection) + len(DmgInfoCollection) + 1, // Extra 1 is for rsync operation. - NoDisplay: cmd.jsonOutputEnabled(), + NoDisplay: cmd.JSONOutputEnabled(), } // Add custom log location @@ -219,7 +220,7 @@ func (cmd *collectLogCmd) Execute(_ []string) error { params.Config = cmd.cfgCmd.config.Path params.TargetFolder = cmd.TargetFolder params.ExtraLogsDir = cmd.ExtraLogsDir - params.JsonOutput = cmd.jsonOutputEnabled() + params.JsonOutput = cmd.JSONOutputEnabled() params.Hostlist = strings.Join(cmd.hostlist, " ") for logFunc, logCmdSet := range DmgInfoCollection { for _, logCmd := range logCmdSet { @@ -266,8 +267,8 @@ func (cmd *collectLogCmd) Execute(_ []string) error { fmt.Printf(progress.Display()) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(nil, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(nil, err) } return nil diff --git a/src/control/cmd/dmg/system.go b/src/control/cmd/dmg/system.go index 3a8be279a9f..af19359453c 100644 --- a/src/control/cmd/dmg/system.go +++ b/src/control/cmd/dmg/system.go @@ -16,6 +16,7 @@ import ( "github.com/pkg/errors" "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" + "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" "github.com/daos-stack/daos/src/control/lib/daos" "github.com/daos-stack/daos/src/control/lib/ranklist" @@ -45,7 +46,7 @@ type leaderQueryCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd DownReplicas bool `short:"N" long:"down-replicas" description:"Show Down Replicas only"` } @@ -66,8 +67,8 @@ func (cmd *leaderQueryCmd) Execute(_ []string) (errOut error) { return err // control api returned an error, disregard response } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if cmd.DownReplicas { @@ -103,7 +104,7 @@ type systemQueryCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd rankListCmd Verbose bool `long:"verbose" short:"v" description:"Display more member details"` } @@ -126,8 +127,8 @@ func (cmd *systemQueryCmd) Execute(_ []string) (errOut error) { return err // control api returned an error, disregard response } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, resp.Errors()) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, resp.Errors()) } var out, outErr strings.Builder @@ -162,7 +163,7 @@ type systemStopCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd rankListCmd Force bool `long:"force" description:"Force stop DAOS system members"` } @@ -187,8 +188,8 @@ func (cmd *systemStopCmd) Execute(_ []string) (errOut error) { return err // control api returned an error, disregard response } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, resp.Errors()) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, resp.Errors()) } var out, outErr strings.Builder @@ -207,7 +208,7 @@ type baseExcludeCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd rankListCmd } @@ -228,8 +229,8 @@ func (cmd *baseExcludeCmd) execute(clear bool) error { return err // control api returned an error, disregard response } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, resp.Errors()) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, resp.Errors()) } updated := ranklist.NewRankSet() @@ -266,7 +267,7 @@ type systemStartCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd rankListCmd } @@ -288,8 +289,8 @@ func (cmd *systemStartCmd) Execute(_ []string) (errOut error) { return err // control api returned an error, disregard response } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, resp.Errors()) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, resp.Errors()) } var out, outErr strings.Builder @@ -308,7 +309,7 @@ type systemCleanupCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd Args struct { Machine string `positional-arg-name:""` @@ -332,8 +333,8 @@ func (cmd *systemCleanupCmd) Execute(_ []string) (errOut error) { return err // control api returned an error, disregard response } - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } var out, outErr strings.Builder @@ -356,7 +357,7 @@ type systemSetAttrCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd Args struct { Attrs ui.SetPropertiesFlag `positional-arg-name:"system attributes to set (key:val[,key:val...])" required:"1"` @@ -370,8 +371,8 @@ func (cmd *systemSetAttrCmd) Execute(_ []string) error { } err := control.SystemSetAttr(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(nil, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(nil, err) } if err != nil { @@ -387,7 +388,7 @@ type systemGetAttrCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd Args struct { Attrs ui.GetPropertiesFlag `positional-arg-name:"system attributes to get (key[,key...])"` @@ -422,8 +423,8 @@ func (cmd *systemGetAttrCmd) Execute(_ []string) error { } resp, err := control.SystemGetAttr(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } if err != nil { @@ -442,7 +443,7 @@ type systemDelAttrCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd Args struct { Attrs ui.GetPropertiesFlag `positional-arg-name:"system attributes to delete (key[,key...])" required:"1"` @@ -459,8 +460,8 @@ func (cmd *systemDelAttrCmd) Execute(_ []string) error { } err := control.SystemSetAttr(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(nil, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(nil, err) } if err != nil { @@ -521,7 +522,7 @@ type systemSetPropCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd Args struct { Props systemSetPropsFlag `positional-arg-name:"system properties to set (key:val[,key:val...])" required:"1"` @@ -535,8 +536,8 @@ func (cmd *systemSetPropCmd) Execute(_ []string) error { } err := control.SystemSetProp(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(nil, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(nil, err) } if err != nil { @@ -593,7 +594,7 @@ type systemGetPropCmd struct { baseCmd cfgCmd ctlInvokerCmd - jsonOutputCmd + cmdutil.JSONOutputCmd Args struct { Props systemGetPropsFlag `positional-arg-name:"system properties to get (key[,key...])"` @@ -628,8 +629,8 @@ func (cmd *systemGetPropCmd) Execute(_ []string) error { } resp, err := control.SystemGetProp(context.Background(), cmd.ctlInvoker, req) - if cmd.jsonOutputEnabled() { - return cmd.outputJSON(resp.Properties, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp.Properties, err) } if err != nil { diff --git a/src/control/cmd/dmg/telemetry.go b/src/control/cmd/dmg/telemetry.go index ca6e748f743..9351496adde 100644 --- a/src/control/cmd/dmg/telemetry.go +++ b/src/control/cmd/dmg/telemetry.go @@ -29,6 +29,7 @@ import ( "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" "github.com/daos-stack/daos/src/control/common" + "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" ) @@ -40,7 +41,7 @@ type telemCmd struct { type telemConfigCmd struct { baseCmd cfgCmd - jsonOutputCmd + cmdutil.JSONOutputCmd InstallDir string `long:"install-dir" short:"i" required:"1" description:"Install directory for telemetry binary"` System string `long:"system" short:"s" default:"prometheus" description:"Telemetry system to configure"` } @@ -303,7 +304,7 @@ type metricsCmd struct { // metricsListCmd provides a list of metrics available from the requested DAOS servers. type metricsListCmd struct { baseCmd - jsonOutputCmd + cmdutil.JSONOutputCmd singleHostCmd Port uint32 `short:"p" long:"port" default:"9191" description:"Telemetry port on the host"` } @@ -319,7 +320,7 @@ func (cmd *metricsListCmd) Execute(args []string) error { req.Port = cmd.Port req.Host = host - if !cmd.shouldEmitJSON { + if !cmd.JSONOutputEnabled() { cmd.Info(getConnectingMsg(req.Host, req.Port)) } @@ -328,11 +329,11 @@ func (cmd *metricsListCmd) Execute(args []string) error { return err } - if cmd.shouldEmitJSON { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } - err = pretty.PrintMetricsListResp(cmd.writer, resp) + err = pretty.PrintMetricsListResp(os.Stdout, resp) if err != nil { return err } @@ -357,7 +358,7 @@ func getConnectingMsg(host string, port uint32) string { // metricsQueryCmd collects the requested metrics from the requested DAOS servers. type metricsQueryCmd struct { baseCmd - jsonOutputCmd + cmdutil.JSONOutputCmd singleHostCmd Port uint32 `short:"p" long:"port" default:"9191" description:"Telemetry port on the host"` Metrics string `short:"m" long:"metrics" default:"" description:"Comma-separated list of metric names"` @@ -375,7 +376,7 @@ func (cmd *metricsQueryCmd) Execute(args []string) error { req.Host = host req.MetricNames = common.TokenizeCommaSeparatedString(cmd.Metrics) - if !cmd.shouldEmitJSON { + if !cmd.JSONOutputEnabled() { cmd.Info(getConnectingMsg(req.Host, req.Port)) } @@ -384,11 +385,11 @@ func (cmd *metricsQueryCmd) Execute(args []string) error { return err } - if cmd.shouldEmitJSON { - return cmd.outputJSON(resp, err) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) } - err = pretty.PrintMetricsQueryResp(cmd.writer, resp) + err = pretty.PrintMetricsQueryResp(os.Stdout, resp) if err != nil { return err } diff --git a/src/control/common/cmdutil/json.go b/src/control/common/cmdutil/json.go new file mode 100644 index 00000000000..6af30f2c1ff --- /dev/null +++ b/src/control/common/cmdutil/json.go @@ -0,0 +1,92 @@ +// +// (C) Copyright 2023 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +package cmdutil + +import ( + "encoding/json" + "io" + + "github.com/pkg/errors" + + "github.com/daos-stack/daos/src/control/lib/atm" + "github.com/daos-stack/daos/src/control/lib/daos" +) + +var _ JSONOutputter = (*JSONOutputCmd)(nil) + +type ( + // JSONOutputter is an interface for commands that can output JSON. + JSONOutputter interface { + EnableJSONOutput(io.Writer, *atm.Bool) + JSONOutputEnabled() bool + OutputJSON(interface{}, error) error + } +) + +// OutputJSON writes the given data or error to the given writer as JSON. +func OutputJSON(writer io.Writer, in interface{}, inErr error) error { + status := 0 + var errStr *string + if inErr != nil { + errStr = func() *string { str := inErr.Error(); return &str }() + if s, ok := errors.Cause(inErr).(daos.Status); ok { + status = int(s) + } else { + status = int(daos.MiscError) + } + } + + data, err := json.MarshalIndent(struct { + Response interface{} `json:"response"` + Error *string `json:"error"` + Status int `json:"status"` + }{in, errStr, status}, "", " ") + if err != nil { + return err + } + + if _, err = writer.Write(append(data, []byte("\n")...)); err != nil { + return err + } + + return inErr +} + +// JSONOutputCmd is a struct that implements JSONOutputter and +// can be embedded in a command struct to provide JSON output. +type JSONOutputCmd struct { + writer io.Writer + jsonEnabled atm.Bool + wroteJSON *atm.Bool +} + +// EnableJSONOutput enables JSON output to the given writer. The +// wroteJSON parameter is optional and is used to track whether +// JSON has been written to the writer. +func (cmd *JSONOutputCmd) EnableJSONOutput(writer io.Writer, wroteJSON *atm.Bool) { + cmd.wroteJSON = wroteJSON + if cmd.wroteJSON == nil { + cmd.wroteJSON = atm.NewBoolRef(false) + } + cmd.writer = writer + cmd.jsonEnabled.SetTrue() +} + +// JSONOutputEnabled returns true if JSON output is enabled. +func (cmd *JSONOutputCmd) JSONOutputEnabled() bool { + return cmd.jsonEnabled.IsTrue() +} + +// OutputJSON writes the given data or error to the command's writer as JSON. +func (cmd *JSONOutputCmd) OutputJSON(in interface{}, err error) error { + if cmd.JSONOutputEnabled() && cmd.wroteJSON.IsFalse() { + cmd.wroteJSON.SetTrue() + return OutputJSON(cmd.writer, in, err) + } + + return nil +} diff --git a/src/control/lib/atm/bool.go b/src/control/lib/atm/bool.go index eb6e046fef4..b68e5c081fc 100644 --- a/src/control/lib/atm/bool.go +++ b/src/control/lib/atm/bool.go @@ -21,6 +21,13 @@ func NewBool(in bool) Bool { return b } +// NewBoolRef returns a reference to a Bool set to the +// provided starting value. +func NewBoolRef(in bool) *Bool { + b := NewBool(in) + return &b +} + // SetTrue sets the Bool to true. func (b *Bool) SetTrue() { atomic.StoreUint32((*uint32)(b), 1) diff --git a/src/control/lib/control/network.go b/src/control/lib/control/network.go index e9901b21018..30496860b98 100644 --- a/src/control/lib/control/network.go +++ b/src/control/lib/control/network.go @@ -207,8 +207,8 @@ type ( // PrimaryServiceRank provides a rank->uri mapping for a DAOS // Primary Service Rank (PSR). PrimaryServiceRank struct { - Rank uint32 - Uri string + Rank uint32 `json:"rank"` + Uri string `json:"uri"` } ClientNetworkHint struct { diff --git a/src/control/lib/hardware/hwprov/topology_cmd.go b/src/control/lib/hardware/hwprov/topology_cmd.go index 10f69944658..cf7b5d85a0d 100644 --- a/src/control/lib/hardware/hwprov/topology_cmd.go +++ b/src/control/lib/hardware/hwprov/topology_cmd.go @@ -8,7 +8,6 @@ package hwprov import ( "context" - "encoding/json" "os" "github.com/pkg/errors" @@ -20,9 +19,9 @@ import ( // DumpTopologyCmd implements a go-flags Commander that dumps // the system topology to stdout or to a file. type DumpTopologyCmd struct { + cmdutil.JSONOutputCmd cmdutil.LogCmd Output string `short:"o" long:"output" default:"stdout" description:"Dump output to this location"` - JSON bool `short:"j" long:"json" description:"Enable JSON output"` } func (cmd *DumpTopologyCmd) Execute(_ []string) error { @@ -42,14 +41,9 @@ func (cmd *DumpTopologyCmd) Execute(_ []string) error { return err } - if !cmd.JSON { - return hardware.PrintTopology(topo, out) + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(topo, err) } - data, err := json.MarshalIndent(topo, "", " ") - if err != nil { - return err - } - _, err = out.Write(append(data, []byte("\n")...)) - return err + return hardware.PrintTopology(topo, out) } diff --git a/src/tests/ftest/control/version.py b/src/tests/ftest/control/version.py index a93f20be0ba..ce74549d03e 100644 --- a/src/tests/ftest/control/version.py +++ b/src/tests/ftest/control/version.py @@ -4,6 +4,7 @@ SPDX-License-Identifier: BSD-2-Clause-Patent ''' import re +import json from apricot import TestWithServers from general_utils import run_pcmd, report_errors @@ -17,6 +18,14 @@ class DAOSVersion(TestWithServers): :avocado: recursive """ + + def __init__(self, *args, **kwargs): + """Initialize a DAOSVersion object.""" + super().__init__(*args, **kwargs) + # Don't waste time starting servers and agents. + self.setup_start_servers = False + self.setup_start_agents = False + def test_version(self): """Verify version number for dmg, daos, daos_server, and daos_agent against RPM. @@ -41,66 +50,23 @@ def test_version(self): self.log.info("RPM version = %s", rpm_version) # Get dmg version. - dmg_cmd = self.get_dmg_command() - output = dmg_cmd.version().stdout.decode("utf-8") - - # Verify that "dmg version" is in the output. - if "dmg version" not in output: - errors.append("dmg version is not in the output! {}".format(output)) - - result = re.findall(r"dmg version ([\d.]+)", output) - if not result: - errors.append("Failed to obtain dmg version! {}".format(output)) - else: - dmg_version = result[0] - self.log.info("dmg version = %s", dmg_version) + dmg_version = self.get_dmg_command().version()["response"]["version"] + self.log.info("dmg version = %s", dmg_version) # Get daos version. - daos_cmd = self.get_daos_command() - output = daos_cmd.version().stdout.decode("utf-8") - - # Verify that "daos version" is in the output. - if "daos version" not in output: - errors.append("daos version is not in the output! {}".format(output)) - - result = re.findall(r"daos version ([\d.]+)", output) - if not result: - errors.append("Failed to obtain daos version! {}".format(output)) - else: - daos_version = result[0] - self.log.info("daos version = %s", daos_version) + daos_version = self.get_daos_command().version()["response"]["version"] + self.log.info("daos version = %s", daos_version) # Get daos_agent version. - daos_agent_cmd = "daos_agent version" + daos_agent_cmd = "daos_agent --json version" output = run_pcmd(hosts=self.hostlist_servers, command=daos_agent_cmd) - stdout = output[0]["stdout"][0] - - # Verify that "DAOS Agent" is in the output. - if "DAOS Agent" not in stdout: - errors.append("DAOS Agent is not in the output! {}".format(stdout)) - - result = re.findall(r"DAOS Agent v([\d.]+)", stdout) - if not result: - errors.append("Failed to obtain daos_agent version! {}".format(output)) - else: - daos_agent_version = result[0] - self.log.info("daos_agent version = %s", daos_agent_version) + daos_agent_version = json.loads("".join(output[0]["stdout"]))["response"]["version"] + self.log.info("daos_agent version = %s", daos_agent_version) # Get daos_server version daos_server_cmd = DaosServerCommandRunner(path=self.bin) - output = daos_server_cmd.version() - stdout = output.stdout.decode("utf-8") - - # Verify that "DAOS Control Server" is in the output. - if "DAOS Control Server" not in stdout: - errors.append("DAOS Control Server is not in the output! {}".format(stdout)) - - result = re.findall(r"DAOS Control Server v([\d.]+)", stdout) - if not result: - errors.append("Failed to obtain daos_server version! {}".format(output)) - else: - daos_server_version = result[0] - self.log.info("daos_server version = %s", daos_server_version) + daos_server_version = daos_server_cmd.version()["response"]["version"] + self.log.info("daos_server version = %s", daos_server_version) # Verify the tool versions against the RPM. tool_versions = [ diff --git a/src/tests/ftest/util/daos_utils.py b/src/tests/ftest/util/daos_utils.py index 7bafe2dee0b..c1d9b30d5ea 100644 --- a/src/tests/ftest/util/daos_utils.py +++ b/src/tests/ftest/util/daos_utils.py @@ -830,11 +830,10 @@ def version(self): """Call daos version. Returns: - CmdResult: an avocado CmdResult object containing the dmg command - information, e.g. exit status, stdout, stderr, etc. + dict: JSON output Raises: - CommandFailure: if the dmg storage query command fails. + CommandFailure: if the daos version command fails. """ - return self._get_result(["version"]) + return self._get_json_result(("version",)) diff --git a/src/tests/ftest/util/dmg_utils.py b/src/tests/ftest/util/dmg_utils.py index cfa6f003f8a..8ff6898bdcd 100644 --- a/src/tests/ftest/util/dmg_utils.py +++ b/src/tests/ftest/util/dmg_utils.py @@ -1320,14 +1320,13 @@ def version(self): """Call dmg version. Returns: - CmdResult: an avocado CmdResult object containing the dmg command - information, e.g. exit status, stdout, stderr, etc. + dict: the dmg json command output converted to a python dictionary Raises: - CommandFailure: if the dmg storage query command fails. + CommandFailure: if the dmg version command fails. """ - return self._get_result(["version"]) + return self._get_json_result(("version",)) def check_system_query_status(data): diff --git a/src/tests/ftest/util/server_utils_base.py b/src/tests/ftest/util/server_utils_base.py index 0aa205405a1..a5ec128040a 100644 --- a/src/tests/ftest/util/server_utils_base.py +++ b/src/tests/ftest/util/server_utils_base.py @@ -59,6 +59,7 @@ def __init__(self, path="", yaml_cfg=None, timeout=45): # -o, --config-path= Path to agent configuration file self.debug = FormattedParameter("--debug", True) self.json_logs = FormattedParameter("--json-logging", False) + self.json = FormattedParameter("--json", False) self.config = FormattedParameter("--config={}", default_yaml_file) # Additional daos_server command line parameters: # --allow-proxy Allow proxy configuration via environment @@ -930,11 +931,10 @@ def version(self): """Call daos_server version. Returns: - CmdResult: an avocado CmdResult object containing the daos_server command - information, e.g. exit status, stdout, stderr, etc. + dict: JSON output Raises: CommandFailure: if the daos_server version command fails. """ - return self._get_result(["version"]) + return self._get_json_result(("version",)) From e636b7a0f5ed3621ed0334a8a4ad3620d84c89d8 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Tue, 4 Jul 2023 07:09:06 +0100 Subject: [PATCH 16/20] DAOS-13471 control: Fix conflict with go build. (#12574) Two PRs that modified the go json code crossed in CI. Signed-off-by: Ashley Pittman # ------------------------ >8 ------------------------ Skip-func-hw-test: true Skip-func-test: true Quick-Functional: true Test-tag: dfuse --- src/control/cmd/daos/filesystem.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/control/cmd/daos/filesystem.go b/src/control/cmd/daos/filesystem.go index 1215ffc72ba..a7cba8d4efe 100644 --- a/src/control/cmd/daos/filesystem.go +++ b/src/control/cmd/daos/filesystem.go @@ -453,7 +453,7 @@ func (cmd *fsDfuseQueryCmd) Execute(_ []string) error { return errors.Wrapf(err, "failed to query %s", cmd.Args.Path) } - if cmd.jsonOutputEnabled() { + if cmd.JSONOutputEnabled() { jsonAttrs := &struct { NumInodes uint64 `json:"inodes"` NumFileHandles uint64 `json:"open_files"` @@ -465,7 +465,7 @@ func (cmd *fsDfuseQueryCmd) Execute(_ []string) error { NumPools: uint64(ap.dfuse_mem.pool_count), NumContainers: uint64(ap.dfuse_mem.container_count), } - return cmd.outputJSON(jsonAttrs, nil) + return cmd.OutputJSON(jsonAttrs, nil) } cmd.Infof("DFuse descriptor usage.") From 8b0a3e3f96a7df732800da8fb3c62e6addf4cf23 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Tue, 4 Jul 2023 09:10:53 +0000 Subject: [PATCH 17/20] Add metrics for testing. Test-tag: dfuse Signed-off-by: Ashley Pittman # ------------------------ >8 ------------------------ Skip-func-hw-test: true Skip-func-test: true Quick-Functional: true Test-tag: dfuse --- src/client/dfuse/dfuse.h | 3 +++ src/client/dfuse/dfuse_main.c | 7 +++++++ src/client/dfuse/ops/open.c | 4 ++++ 3 files changed, 14 insertions(+) diff --git a/src/client/dfuse/dfuse.h b/src/client/dfuse/dfuse.h index 75ef013b04c..72a014d43be 100644 --- a/src/client/dfuse/dfuse.h +++ b/src/client/dfuse/dfuse.h @@ -61,6 +61,9 @@ struct dfuse_info { ATOMIC uint64_t di_fh_count; ATOMIC uint64_t di_pool_count; ATOMIC uint64_t di_container_count; + + ATOMIC uint64_t di_open_count; + ATOMIC uint64_t di_open_preread; }; /* legacy, allow the old name for easier migration */ diff --git a/src/client/dfuse/dfuse_main.c b/src/client/dfuse/dfuse_main.c index 8da5e9356a4..064f6158792 100644 --- a/src/client/dfuse/dfuse_main.c +++ b/src/client/dfuse/dfuse_main.c @@ -398,6 +398,9 @@ main(int argc, char **argv) dfuse_info->di_wb_cache = true; dfuse_info->di_eq_count = 1; + atomic_init(&dfuse_info->di_open_count, 1); + atomic_init(&dfuse_info->di_open_preread, 1); + while (1) { c = getopt_long(argc, argv, "Mm:St:o:fhv", long_options, NULL); @@ -692,6 +695,10 @@ main(int argc, char **argv) DFUSE_TRA_DOWN(dfuse_info); daos_fini(); out_debug: + + DFUSE_TRA_INFO(dfuse_info, "Opens %lx preread %lx", + atomic_load_relaxed(&dfuse_info->di_open_count), + atomic_load_relaxed(&dfuse_info->di_open_preread)); D_FREE(dfuse_info); DFUSE_LOG_INFO("Exiting with status %d", rc); daos_debug_fini(); diff --git a/src/client/dfuse/ops/open.c b/src/client/dfuse/ops/open.c index d366dd086dd..f68f27d4f06 100644 --- a/src/client/dfuse/ops/open.c +++ b/src/client/dfuse/ops/open.c @@ -95,10 +95,14 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) atomic_fetch_add_relaxed(&ie->ie_open_count, 1); + atomic_fetch_add_relaxed(&dfuse_info->di_open_count, 1); + /* Enable this for files up to the max read size. */ if (prefetch && oh->doh_parent_dir && atomic_load_relaxed(&oh->doh_parent_dir->ie_linear_read) && ie->ie_stat.st_size > 0 && ie->ie_stat.st_size <= DFUSE_MAX_READ) { + atomic_fetch_add_relaxed(&dfuse_info->di_open_preread, 1); + D_ALLOC_PTR(oh->doh_readahead); if (oh->doh_readahead) { D_MUTEX_INIT(&oh->doh_readahead->dra_lock, 0); From a8a3d8f67b55808b844eb489dc5b98d53fbd72b7 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Tue, 4 Jul 2023 19:30:43 +0000 Subject: [PATCH 18/20] Fix error path. Test-tag: dfuse Signed-off-by: Ashley Pittman # ------------------------ >8 ------------------------ Skip-func-hw-test: true Skip-func-test: true Quick-Functional: true Test-tag: dfuse --- src/client/dfuse/dfuse_main.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/client/dfuse/dfuse_main.c b/src/client/dfuse/dfuse_main.c index 064f6158792..3b1823fce3f 100644 --- a/src/client/dfuse/dfuse_main.c +++ b/src/client/dfuse/dfuse_main.c @@ -398,8 +398,8 @@ main(int argc, char **argv) dfuse_info->di_wb_cache = true; dfuse_info->di_eq_count = 1; - atomic_init(&dfuse_info->di_open_count, 1); - atomic_init(&dfuse_info->di_open_preread, 1); + atomic_init(&dfuse_info->di_open_count, 0); + atomic_init(&dfuse_info->di_open_preread, 0); while (1) { c = getopt_long(argc, argv, "Mm:St:o:fhv", long_options, NULL); @@ -691,14 +691,11 @@ main(int argc, char **argv) D_ASSERT(atomic_load_relaxed(&dfuse_info->di_pool_count) == 0); D_ASSERT(atomic_load_relaxed(&dfuse_info->di_container_count) == 0); } - - DFUSE_TRA_DOWN(dfuse_info); - daos_fini(); -out_debug: - DFUSE_TRA_INFO(dfuse_info, "Opens %lx preread %lx", atomic_load_relaxed(&dfuse_info->di_open_count), atomic_load_relaxed(&dfuse_info->di_open_preread)); + daos_fini(); +out_debug: D_FREE(dfuse_info); DFUSE_LOG_INFO("Exiting with status %d", rc); daos_debug_fini(); From beae16e22451b93eb5ed9424b11a58b00cf2c283 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Thu, 6 Jul 2023 08:21:05 +0000 Subject: [PATCH 19/20] Back out metrics. Test-tag: dfuse Skip-func-hw-test: true Skip-func-test: true Quick-Functional: true Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/dfuse/dfuse.h | 3 --- src/client/dfuse/dfuse_main.c | 8 ++------ src/client/dfuse/ops/open.c | 4 ---- 3 files changed, 2 insertions(+), 13 deletions(-) diff --git a/src/client/dfuse/dfuse.h b/src/client/dfuse/dfuse.h index 72a014d43be..75ef013b04c 100644 --- a/src/client/dfuse/dfuse.h +++ b/src/client/dfuse/dfuse.h @@ -61,9 +61,6 @@ struct dfuse_info { ATOMIC uint64_t di_fh_count; ATOMIC uint64_t di_pool_count; ATOMIC uint64_t di_container_count; - - ATOMIC uint64_t di_open_count; - ATOMIC uint64_t di_open_preread; }; /* legacy, allow the old name for easier migration */ diff --git a/src/client/dfuse/dfuse_main.c b/src/client/dfuse/dfuse_main.c index 3b1823fce3f..8da5e9356a4 100644 --- a/src/client/dfuse/dfuse_main.c +++ b/src/client/dfuse/dfuse_main.c @@ -398,9 +398,6 @@ main(int argc, char **argv) dfuse_info->di_wb_cache = true; dfuse_info->di_eq_count = 1; - atomic_init(&dfuse_info->di_open_count, 0); - atomic_init(&dfuse_info->di_open_preread, 0); - while (1) { c = getopt_long(argc, argv, "Mm:St:o:fhv", long_options, NULL); @@ -691,9 +688,8 @@ main(int argc, char **argv) D_ASSERT(atomic_load_relaxed(&dfuse_info->di_pool_count) == 0); D_ASSERT(atomic_load_relaxed(&dfuse_info->di_container_count) == 0); } - DFUSE_TRA_INFO(dfuse_info, "Opens %lx preread %lx", - atomic_load_relaxed(&dfuse_info->di_open_count), - atomic_load_relaxed(&dfuse_info->di_open_preread)); + + DFUSE_TRA_DOWN(dfuse_info); daos_fini(); out_debug: D_FREE(dfuse_info); diff --git a/src/client/dfuse/ops/open.c b/src/client/dfuse/ops/open.c index f68f27d4f06..d366dd086dd 100644 --- a/src/client/dfuse/ops/open.c +++ b/src/client/dfuse/ops/open.c @@ -95,14 +95,10 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) atomic_fetch_add_relaxed(&ie->ie_open_count, 1); - atomic_fetch_add_relaxed(&dfuse_info->di_open_count, 1); - /* Enable this for files up to the max read size. */ if (prefetch && oh->doh_parent_dir && atomic_load_relaxed(&oh->doh_parent_dir->ie_linear_read) && ie->ie_stat.st_size > 0 && ie->ie_stat.st_size <= DFUSE_MAX_READ) { - atomic_fetch_add_relaxed(&dfuse_info->di_open_preread, 1); - D_ALLOC_PTR(oh->doh_readahead); if (oh->doh_readahead) { D_MUTEX_INIT(&oh->doh_readahead->dra_lock, 0); From 0f1fc21001646cb6164d0bbd6df04b2867b9b8b9 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Thu, 13 Jul 2023 11:47:55 +0000 Subject: [PATCH 20/20] Fix merge. Test-tag: dfuse Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/dfuse/ops/open.c | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/src/client/dfuse/ops/open.c b/src/client/dfuse/ops/open.c index efdfa1373eb..d6d8f8667e7 100644 --- a/src/client/dfuse/ops/open.c +++ b/src/client/dfuse/ops/open.c @@ -10,29 +10,18 @@ void dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { -<<<<<<< HEAD struct dfuse_info *dfuse_info = fuse_req_userdata(req); struct dfuse_inode_entry *ie; - d_list_t *rlink; struct dfuse_obj_hdl *oh; struct fuse_file_info fi_out = {0}; int rc; bool prefetch = false; -======= - struct dfuse_projection_info *fs_handle = fuse_req_userdata(req); - struct dfuse_inode_entry *ie; - d_list_t *rlink; - struct dfuse_obj_hdl *oh = NULL; - struct fuse_file_info fi_out = {0}; - int rc; ->>>>>>> master - - rlink = d_hash_rec_find(&dfuse_info->dpi_iet, &ino, sizeof(ino)); - if (!rlink) { + + ie = dfuse_inode_lookup(dfuse_info, ino); + if (!ie) { DFUSE_REPLY_ERR_RAW(dfuse_info, req, ENOENT); return; } - ie = container_of(rlink, struct dfuse_inode_entry, ie_htl); D_ALLOC_PTR(oh); if (!oh) @@ -115,7 +104,7 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) } } - d_hash_rec_decref(&dfuse_info->dpi_iet, rlink); + dfuse_inode_decref(dfuse_info, ie); DFUSE_REPLY_OPEN(oh, req, &fi_out); if (oh->doh_readahead) @@ -123,7 +112,7 @@ dfuse_cb_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) return; err: - d_hash_rec_decref(&dfuse_info->dpi_iet, rlink); + dfuse_inode_decref(dfuse_info, ie); dfuse_oh_free(dfuse_info, oh); DFUSE_REPLY_ERR_RAW(ie, req, rc); }