Skip to content

Commit

Permalink
Merge branch 'master' into amd/pre-read
Browse files Browse the repository at this point in the history
Required-githooks: true
  • Loading branch information
ashleypittman committed Oct 5, 2023
2 parents 661c033 + 7229422 commit 4d3f28e
Show file tree
Hide file tree
Showing 27 changed files with 859 additions and 506 deletions.
25 changes: 25 additions & 0 deletions docs/admin/administration.md
Original file line number Diff line number Diff line change
Expand Up @@ -960,3 +960,28 @@ DAOS v2.2 client connections to pools which were created by DAOS v2.4
will be rejected. DAOS v2.4 client should work with DAOS v2.4 and DAOS v2.2
server. To upgrade all pools to latest format after software upgrade, run
`dmg pool upgrade <pool>`
### Interoperability Matrix
The following table is intended to visually depict the interoperability
policies for all major components in a DAOS system.
||Server<br>(daos_server)|Engine<br>(daos_engine)|Agent<br>(daos_agent)|Client<br>(libdaos)|Admin<br>(dmg)|
|:---|:---:|:---:|:---:|:---:|:---:|
|Server|x.y.z|x.y.z|x.(y±1)|n/a|x.y|
|Engine|x.y.z|x.y.z|n/a|x.(y±1)|n/a|
|Agent|x.(y±1)|n/a|n/a|x.y.z|n/a|
|Client|n/a|x.(y±1)|x.y.z|n/a|n/a|
|Admin|x.y|n/a|n/a|n/a|n/a|
Key:
* x.y.z: Major.Minor.Patch must be equal
* x.y: Major.Minor must be equal
* x.(y±1): Major must be equal, Minor must be equal or -1/+1 release version
* n/a: Components do not communicate
Examples:
* daos_server 2.4.0 is only compatible with daos_engine 2.4.0
* daos_agent 2.6.0 is compatible with daos_server 2.4.0 (2.5 is a development version)
* dmg 2.4.1 is compatible with daos_server 2.4.0
91 changes: 0 additions & 91 deletions src/client/dfs/dfs_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,97 +131,6 @@ dfs_relink_root(daos_handle_t coh);
int
dfs_ostatx(dfs_t *dfs, dfs_obj_t *obj, struct stat *stbuf, daos_event_t *ev);

/** Internal pipeline readdir functionality */

/** DFS pipeline object */
typedef struct dfs_pipeline dfs_pipeline_t;

enum {
DFS_FILTER_NAME = (1 << 1),
DFS_FILTER_NEWER = (1 << 2),
DFS_FILTER_INCLUDE_DIRS = (1 << 3),
};

/** Predicate conditions for filter */
typedef struct {
char dp_name[DFS_MAX_NAME]; /** name condition for entry - regex */
time_t dp_newer; /** timestamp for newer condition */
size_t dp_size; /** size of files - not supported for now */
} dfs_predicate_t;

/**
* Same as dfs_get_size() but using the OID of the file instead of the open handle. Note that the
* chunk_size of the file is also required to be passed if the file was created with a different
* chunk size than the default (passing other than 0 to dfs_open). Otherwise, 0 should be passed to
* chunk size.
*
* \param[in] dfs Pointer to the mounted file system.
* \param[in] oid Object ID of the file.
* \param[in] chunk_size Chunk size of the file (pass 0 if it was created with default).
* \param[out] size Returned size of the file.
*
* \return 0 on success, errno code on failure.
*/
int
dfs_get_size_by_oid(dfs_t *dfs, daos_obj_id_t oid, daos_size_t chunk_size, daos_size_t *size);

/**
* Create a pipeline object to be used during readdir with filter. Should be destroyed with
* dfs_pipeline_destroy().
*
* \param[in] dfs Pointer to the mounted file system.
* \param[in] pred Predicate condition values (name/regex, newer timestamp, etc.).
* \param[in] flags Pipeline flags (conditions to apply).
* \param[out] dpipe Pipeline object created.
*
* \return 0 on success, errno code on failure.
*/
int
dfs_pipeline_create(dfs_t *dfs, dfs_predicate_t pred, uint64_t flags, dfs_pipeline_t **dpipe);

/**
* Destroy pipeline object.
*
* \param[in] dpipe Pipeline object.
*
* \return 0 on success, errno code on failure.
*/
int
dfs_pipeline_destroy(dfs_pipeline_t *dpipe);

/**
* Same as dfs_readdir() but this additionally applies a filter created with dfs_pipeline_create()
* on the entries that are enumerated. This function also optionally returns the object ID of each
* dirent if requested through a pre-allocated OID input array.
*
* \param[in] dfs Pointer to the mounted file system.
* \param[in] obj Opened directory object.
* \param[in] dpipe DFS pipeline filter.
* \param[in,out]
* anchor Hash anchor for the next call, it should be set to
* zeroes for the first call, it should not be changed
* by caller between calls.
* \param[in,out]
* nr [in]: number of dirents allocated in \a dirs.
* [out]: number of returned dirents.
* \param[in,out]
* dirs [in] preallocated array of dirents.
* [out]: dirents returned with d_name filled only.
* \param[in,out]
* oids [in] Optional preallocated array of object IDs.
* [out]: Object ID associated with each dirent that was read.
* \param[in,out]
* csizes [in] Optional preallocated array of sizes.
* [out]: chunk size associated with each dirent that was read.
* \param[out] Total number of entries scanned by readdir before returning.
*
* \return 0 on success, errno code on failure.
*/
int
dfs_readdir_with_filter(dfs_t *dfs, dfs_obj_t *obj, dfs_pipeline_t *dpipe, daos_anchor_t *anchor,
uint32_t *nr, struct dirent *dirs, daos_obj_id_t *oids, daos_size_t *csizes,
uint64_t *nr_scanned);

#if defined(__cplusplus)
}
#endif
Expand Down
12 changes: 9 additions & 3 deletions src/control/cmd/daos/filesystem.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,10 @@ type fsCmd struct {
type fsCopyCmd struct {
daosCmd

Source string `long:"src" short:"s" description:"copy source" required:"1"`
Dest string `long:"dst" short:"d" description:"copy destination" required:"1"`
Preserve string `long:"preserve-props" short:"m" description:"preserve container properties, requires HDF5 library" required:"0"`
Source string `long:"src" short:"s" description:"copy source" required:"1"`
Dest string `long:"dst" short:"d" description:"copy destination" required:"1"`
Preserve string `long:"preserve-props" short:"m" description:"preserve container properties, requires HDF5 library" required:"0"`
IgnoreUnsup bool `long:"ignore-unsupported" description:"ignore unsupported filesystem features when copying to DFS" required:"0"`
}

func (cmd *fsCopyCmd) Execute(_ []string) error {
Expand All @@ -64,6 +65,7 @@ func (cmd *fsCopyCmd) Execute(_ []string) error {
ap.preserve_props = C.CString(cmd.Preserve)
defer freeString(ap.preserve_props)
}
ap.ignore_unsup = C.bool(cmd.IgnoreUnsup)

ap.fs_op = C.FS_COPY
rc := C.fs_copy_hdlr(ap)
Expand Down Expand Up @@ -107,6 +109,10 @@ func (cmd *fsCopyCmd) Execute(_ []string) error {
cmd.Infof(" Files: %d", ap.fs_copy_stats.num_files)
cmd.Infof(" Links: %d", ap.fs_copy_stats.num_links)

if ap.fs_copy_stats.num_chmod_enotsup > 0 {
return errors.New(fmt.Sprintf("Copy completed successfully, but %d files had unsupported mode bits that could not be applied. Run with --ignore-unsupported to suppress this warning.", ap.fs_copy_stats.num_chmod_enotsup))
}

return nil
}

Expand Down
2 changes: 1 addition & 1 deletion src/control/common/proto/logging.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ func Debug(msg proto.Message) string {
fmt.Fprintf(&bld, " %s:%s", p.Label, p.State)
}
case *mgmtpb.JoinResp:
fmt.Fprintf(&bld, "%T rank:%d (state:%s) map:%d", m, m.Rank, m.State, m.MapVersion)
fmt.Fprintf(&bld, "%T rank:%d (state:%s, local:%t) map:%d", m, m.Rank, m.State, m.LocalJoin, m.MapVersion)
case *mgmtpb.GetAttachInfoResp:
msRanks := ranklist.RankSetFromRanks(ranklist.RanksFromUint32(m.MsRanks))
uriRanks := ranklist.NewRankSet()
Expand Down
Loading

0 comments on commit 4d3f28e

Please sign in to comment.