diff --git a/pom.xml b/pom.xml index 061b4df..a20e950 100644 --- a/pom.xml +++ b/pom.xml @@ -1,154 +1,307 @@ - + + + 4.0.0 - org.apache.hadoop.fs.nfsv3 - hadoop-connector-nfsv3 + org.apache.hadoop + hadoop-nfs-connector + 1.0.4 + Apache Hadoop NFS Connector + + This module contains code to support integration with a NFSv3 storage server. + Currently this consists of a filesystem client to read and write data from + a NFSv3 storage server. It supports AUTH_NONE and AUTH_UNIX authentication. + jar - 1.0 - hadoop-nfsv3 - https://github.com/NetApp/NetApp-Hadoop-NFS-Connector + + UTF-8 + true + + + - build-profile + tests-off - true + + src/test/resources/nfs-test-options.xml + - - - - org.apache.maven.plugins - maven-surefire-plugin - 2.17 - - -Xmx1024m -XX:MaxPermSize=256m - - **/*NFSFileSystemTest.java - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - ${project.build.directory} - - - - - org.codehaus.mojo - cobertura-maven-plugin - 2.5.1 - - - html - xml - - - - - package - - cobertura - - - - - - + + true + - test-profile + tests-on - - NFSTestServer - enable - + + src/test/resources/nfs-test-options.xml + - - - - org.apache.maven.plugins - maven-surefire-plugin - 2.17 - - -Xmx1024m -XX:MaxPermSize=256m - - - nfs_server - ${nfs_server} - path - ${path} - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - ${project.build.directory} - - - - - org.codehaus.mojo - cobertura-maven-plugin - 2.5.1 - - - html - xml - - - - - package - - cobertura - - - - - - + + false + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.2 + + 1.7 + 1.7 + + + + org.codehaus.mojo + findbugs-maven-plugin + + true + true + ${basedir}/dev-support/findbugs-exclude.xml + + Max + + + + org.apache.maven.plugins + maven-project-info-reports-plugin + + + false + false + + + + org.apache.maven.plugins + maven-surefire-plugin + + 3600 + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + src/config/checkstyle.xml + + + + + + + org.apache.hadoop - hadoop-common - 2.4.0 + hadoop-annotations + 2.6.0 + provided + + + org.apache.hadoop + hadoop-auth + 2.6.0 + provided org.apache.hadoop hadoop-nfs - 2.4.0 + 2.6.0 + provided org.apache.hadoop - hadoop-common - 2.4.0 - tests + hadoop-hdfs-nfs + 2.6.0 + test io.netty - netty-all - 4.0.24.Final + netty + 3.6.2.Final + compile + + + org.apache.hadoop + hadoop-hdfs + 2.6.0 + compile + + + org.apache.hadoop + hadoop-hdfs + 2.6.0 + test + test-jar + + + org.apache.hadoop + hadoop-common + 2.6.0 + provided + + + org.apache.hadoop + hadoop-common + 2.6.0 + test + test-jar + + + org.apache.zookeeper + zookeeper + 3.4.6 + test-jar + test + + + com.google.guava + guava + 11.0.2 + compile + + + org.mortbay.jetty + jetty + 6.1.26 + compile + + + org.mortbay.jetty + jetty-util + 6.1.26 + compile + + + com.sun.jersey + jersey-core + 1.9 + compile + + + com.sun.jersey + jersey-server + 1.9 + compile commons-cli commons-cli 1.2 + compile + + + commons-codec + commons-codec + 1.4 + compile + + + commons-io + commons-io + 2.4 + compile + + + commons-lang + commons-lang + 2.6 + compile + + + commons-logging + commons-logging + 1.1.3 + compile + + + commons-daemon + commons-daemon + 1.0.13 + compile + + + log4j + log4j + 1.2.17 + compile + + + com.google.protobuf + protobuf-java + 2.5.0 + compile + + + javax.servlet + servlet-api + 2.5 + compile junit junit - 4.0 + 4.11 + test + + + org.mockito + mockito-all + 1.8.5 test + + org.slf4j + slf4j-log4j12 + 1.7.5 + provided + + + org.codehaus.jackson + jackson-core-asl + 1.9.3 + compile + + + org.codehaus.jackson + jackson-mapper-asl + 1.9.3 + compile + + + xmlenc + xmlenc + 0.52 + compile + + + diff --git a/src/config/checkstyle.xml b/src/config/checkstyle.xml new file mode 100644 index 0000000..9df4f78 --- /dev/null +++ b/src/config/checkstyle.xml @@ -0,0 +1,187 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/main/java/org/apache/hadoop/fs/nfs/.DS_Store b/src/main/java/org/apache/hadoop/fs/nfs/.DS_Store deleted file mode 100644 index 26286d5..0000000 Binary files a/src/main/java/org/apache/hadoop/fs/nfs/.DS_Store and /dev/null differ diff --git a/src/main/java/org/apache/hadoop/fs/nfs/NFSv3AbstractFilesystem.java b/src/main/java/org/apache/hadoop/fs/nfs/NFSv3AbstractFilesystem.java index e7468ea..418d921 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/NFSv3AbstractFilesystem.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/NFSv3AbstractFilesystem.java @@ -41,7 +41,6 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.Progressable; @@ -88,7 +87,7 @@ public void checkScheme(URI uri, String supportedScheme) { @Override public int getUriDefaultPort() { - return Nfs3Constant.NFS3_SERVER_PORT_DEFAULT; + return NFSv3FileSystem.DEFAULT_NFS_PORT; } @Override diff --git a/src/main/java/org/apache/hadoop/fs/nfs/NFSv3DelegatedFilesystem.java b/src/main/java/org/apache/hadoop/fs/nfs/NFSv3DelegatedFilesystem.java index d9520ea..4aa0af6 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/NFSv3DelegatedFilesystem.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/NFSv3DelegatedFilesystem.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DelegateToFileSystem; -import org.apache.hadoop.nfs.nfs3.Nfs3Constant; public class NFSv3DelegatedFilesystem extends DelegateToFileSystem { @@ -30,6 +29,6 @@ public class NFSv3DelegatedFilesystem extends DelegateToFileSystem { @Override public int getUriDefaultPort() { - return Nfs3Constant.NFS3_SERVER_PORT_DEFAULT; + return NFSv3FileSystem.DEFAULT_NFS_PORT; } } diff --git a/src/main/java/org/apache/hadoop/fs/nfs/NFSv3FileSystem.java b/src/main/java/org/apache/hadoop/fs/nfs/NFSv3FileSystem.java index bc3c13a..0d7d95f 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/NFSv3FileSystem.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/NFSv3FileSystem.java @@ -1,17 +1,18 @@ /** * Copyright 2014 NetApp Inc. All Rights Reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. */ - package org.apache.hadoop.fs.nfs; import java.io.FileNotFoundException; @@ -19,6 +20,7 @@ import java.net.URI; import java.util.ArrayList; import java.util.EnumSet; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Set; @@ -34,8 +36,10 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.nfs.NfsFileType; @@ -44,7 +48,6 @@ import org.apache.hadoop.nfs.nfs3.Nfs3DirList; import org.apache.hadoop.nfs.nfs3.Nfs3DirList.Nfs3DirEntry; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; -import org.apache.hadoop.nfs.nfs3.Nfs3Info; import org.apache.hadoop.nfs.nfs3.Nfs3SetAttr; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.request.SetAttr3; @@ -56,11 +59,18 @@ import org.apache.hadoop.nfs.nfs3.response.RENAME3Response; import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response; import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response; -import org.apache.hadoop.portmap.PortmapMapping; import org.apache.hadoop.util.Progressable; -import org.apache.hadoop.fs.nfs.portmap.PortmapClient; import org.apache.hadoop.fs.nfs.stream.NFSBufferedInputStream; import org.apache.hadoop.fs.nfs.stream.NFSBufferedOutputStream; +import org.apache.hadoop.fs.nfs.topology.Endpoint; +import org.apache.hadoop.fs.nfs.topology.Mapping; +import org.apache.hadoop.fs.nfs.topology.Namespace; +import org.apache.hadoop.fs.nfs.topology.NamespaceOptions; +import org.apache.hadoop.fs.nfs.topology.SimpleTopologyRouter; +import org.apache.hadoop.fs.nfs.topology.TopologyRouter; +import org.apache.hadoop.oncrpc.security.Credentials; +import org.apache.hadoop.oncrpc.security.CredentialsNone; +import org.apache.hadoop.oncrpc.security.CredentialsSys; /** *

@@ -71,855 +81,817 @@ @InterfaceStability.Stable public class NFSv3FileSystem extends FileSystem { - private Configuration configuration; - private Path workingDir; - private URI uri; - private String scheme; - private String authority; - private String hostname; - private int port; - - private NFSv3FileSystemStore store; - - private HandleCache handleCache; - public static final int FILE_HANDLE_CACHE_SIZE = 256; - - // Number of connections - public static final int DEFAULT_NUM_CONNECTIONS = 4; - public static final int MAX_NUM_CONNECTIONS = 32; - public static final int MIN_NUM_CONNECTIONS = 1; - private int numConnections; - - // Read/write block size in bytes - public static final int DEFAULT_READ_BLOCK_SIZE_BITS = 20; - public static final int DEFAULT_WRITE_BLOCK_SIZE_BITS = 20; - private int readBlockSizeBits; - private int writeBlockSizeBits; - - // Default file split size - public static final int DEFAULT_NFS_SPLIT_SIZE_BITS = 28; - private int splitSizeBits; - - // Default UID and GID when creating a new file/directory - public static final int NFS_UID_DEFAULT = 0; - public static final int NFS_GID_DEFAULT = 0; - public static final String NFS_USER_NAME_DEFAULT = "root"; - public static final String NFS_GROUP_NAME_DEFAULT = "root"; - public static final String NFS_URI_SCHEME = "nfs"; - - // Actual UID, GID, user name and group name to use when creating a new file/directory. Values - // from the configuration file will overwrite default values defined above. - private int NFS_UID; - private int NFS_GID; - private String NFS_USER_NAME; - private String NFS_GROUP_NAME; - - public final static Log LOG = LogFactory.getLog(NFSv3FileSystem.class); - private Nfs3Info fsInfo; - - private final static String UNIX_DOT_DIR = "."; - private final static String UNIX_DOT_DOT_DIR = ".."; - - public long getNfsBlockSize() { - return (1L << readBlockSizeBits); - } - - public long getSplitSize() { - return (1L << splitSizeBits); - } - - public NFSv3FileSystem() { - // set store in initialize() - } - - @Override - public String getScheme() { - return NFS_URI_SCHEME; - } - - @Override - public URI getUri() { - LOG.debug("START/END getUri(): uri=" + uri); - return uri; - } - - @Override - public void close() throws IOException { - store.shutdown(); - } - - @Override - public void initialize(URI uri, Configuration conf) throws IOException { - - super.initialize(uri, conf); - - // Save configuration - this.configuration = conf; - - // Get scheme - scheme = uri.getScheme(); - if (scheme == null || scheme.equals(NFS_URI_SCHEME) == false) { - LOG.error("NFS URI scheme is wrong! It was " + scheme - + " but it should be " + NFS_URI_SCHEME); - throw new IOException("Scheme in URI is wrong! It was " + scheme); - } - // Get authority - authority = uri.getAuthority(); - if (authority == null) { - LOG.error("NFS URI authority is wrong! It was " + authority); - throw new IOException("NFS URI authority is wrong! It was " + authority); - } + private Configuration configuration; + private Path workingDir; + private URI uri; + private Namespace space; - // Get NFS server name from the URI or config file - // if URI does not specify hostname, look for the "fs.nfs.server" property - // in the configuration - hostname = uri.getHost(); - if (hostname == null) { - hostname = conf.get("fs.nfs.server", null); - } - if (hostname == null) { - LOG.error("NFS URI hostname is wrong! It was " + hostname); - throw new IOException("NFS URI hostname is wrong! It was " + hostname); + private HandleCache handleCache; + + private TopologyRouter router; + + // Actual UID, GID, user name and group name to use when creating a new file/directory. Values + // from the configuration file will overwrite default values defined above. + private int NFS_UID; + private int NFS_GID; + private String NFS_USER_NAME; + private String NFS_GROUP_NAME; + + public static final String NFS_URI_SCHEME = "nfs"; + public static final int FILE_HANDLE_CACHE_SIZE = 1048576; + public static final int DEFAULT_NFS_PORT = 2049; + public static final int DEFAULT_READ_BLOCK_SIZE_BITS = 20; + public static final int DEFAULT_WRITE_BLOCK_SIZE_BITS = 20; + + // Default file split size + public static final int DEFAULT_NFS_SPLIT_SIZE_BITS = 28; + + // Default UID and GID when creating a new file/directory + public static final int NFS_UID_DEFAULT = 0; + public static final int NFS_GID_DEFAULT = 0; + public static final String NFS_USER_NAME_DEFAULT = "root"; + public static final String NFS_GROUP_NAME_DEFAULT = "root"; + + public static final String UNIX_DOT_DIR = "."; + public static final String UNIX_DOT_DOT_DIR = ".."; + + public final static Log LOG = LogFactory.getLog(NFSv3FileSystem.class); + + public NFSv3FileSystem() { + // set store in initialize() } - - // Set the "fs.nfs.server" in the conf to be the final one - conf.set("fs.nfs.server", hostname); - this.uri = URI.create(scheme + "://" + authority); - // Get NFS host port information from portmap protocol. - // This is the port we should use. - int port = getNFSPortFromPortMap(); - if (port <= 0) { - throw new IOException("NFS program is not registered!"); + public NFSv3FileSystem(URI uri, Configuration conf) throws IOException { + initialize(uri, conf); } - - // Verify the port from the URI. - int portInUri = uri.getPort(); - if (portInUri != -1 && portInUri != port) { - LOG.warn("Port specified in URI is different from port gotten from portmap: " - + uri.getPort() + "!=" + port); - } - if (port != Nfs3Constant.NFS3_SERVER_PORT_DEFAULT) { - LOG.warn("Port gotten from portmap is different from default nfs port: " - + port + "!=" + Nfs3Constant.NFS3_SERVER_PORT_DEFAULT); - } - conf.setInt(Nfs3Constant.NFS3_SERVER_PORT, port); - - setConf(conf); - initializeInternal(this.uri, conf); - - this.handleCache = new HandleCache(FILE_HANDLE_CACHE_SIZE); - workingDir = getHomeDirectory(); - } - - private void initializeInternal(URI uri, Configuration conf) - throws IOException { - - // Get configuration parameters for read/write block size - readBlockSizeBits = - conf.getInt("fs." + uri.getScheme() + ".readblockbits", - DEFAULT_READ_BLOCK_SIZE_BITS); - writeBlockSizeBits = - conf.getInt("fs." + uri.getScheme() + ".writeblockbits", - DEFAULT_WRITE_BLOCK_SIZE_BITS); - splitSizeBits = conf.getInt("fs." + uri.getScheme() + ".splitsizebits", DEFAULT_NFS_SPLIT_SIZE_BITS); - - // Get configuration values for UID, GID, user name and group name - NFS_UID = conf.getInt("fs." + uri.getScheme() + ".uid", NFS_UID_DEFAULT); - NFS_GID = conf.getInt("fs." + uri.getScheme() + ".gid", NFS_GID_DEFAULT); - NFS_USER_NAME = conf.get("fs." + uri.getScheme() + ".username", NFS_USER_NAME_DEFAULT); - NFS_GROUP_NAME = conf.get("fs." + uri.getScheme() + ".groupname", NFS_GROUP_NAME_DEFAULT); - LOG.info("Using credentials uid=" + NFS_UID + ", gid=" + NFS_GID + ", username=" + NFS_USER_NAME + ", groupname=" + NFS_GROUP_NAME); - - // Number of connections to NFS - numConnections = Math.min(MAX_NUM_CONNECTIONS, Math.max(MIN_NUM_CONNECTIONS, conf.getInt("fs." + uri.getScheme() + ".numconnections", DEFAULT_NUM_CONNECTIONS))); - - // Initialize the filesystem store - store = new NFSv3FileSystemStore(uri, conf, numConnections); - store.initialize(uri, conf); - // Get FileSystem Information. We need dtpref (the preferred size in bytes) - // for READDIR request - fsInfo = store.getFilesystemInfo(store.getRootfh(), store.getCredentials()); - if (fsInfo == null) { - throw new IOException("NFS_FSINFO error: result is null"); + @Override + public void initialize(URI uri, Configuration conf) throws IOException { + + super.initialize(uri, conf); + + // Save configuration + this.configuration = conf; + this.uri = uri; + + // Here, we get either a config option pointing + // to additional info or a simple URI (in which case we just use defaults) + if (configuration.get("fs." + NFS_URI_SCHEME + ".configuration") != null) { + space = Mapping.loadFromFile(configuration.get("fs." + NFS_URI_SCHEME + ".configuration")).getNamespace(uri); + } // Create a namespace with defaults + else { + LOG.info("The URI " + uri + " has no additional config defined, resorting to defaults"); + space = new Mapping().buildNamespace(uri); + } + + // Get configuration from namespace + NFS_UID = space.getConfiguration().getNfsUid(); + NFS_GID = space.getConfiguration().getNfsGid(); + NFS_USER_NAME = space.getConfiguration().getNfsUsername(); + NFS_GROUP_NAME = space.getConfiguration().getNfsGroupname(); + + // Initialize router + // TODO: Make the router class configurable + router = new SimpleTopologyRouter(); + router.initialize(this, space, configuration); + + setConf(conf); + this.handleCache = new HandleCache(FILE_HANDLE_CACHE_SIZE); + workingDir = getHomeDirectory(); } - // Check the values from fsinfo for read/write block size - int Rtmax = fsInfo.getRtmax(); - if (Rtmax > 0 && ((1 << readBlockSizeBits) > Rtmax)) { - LOG.warn("Overwrite readBlockSize with Rtmax gottern from NFS server: " - + (1 << readBlockSizeBits) + "=>" + Rtmax); - readBlockSizeBits = 31 - Integer.numberOfLeadingZeros(Rtmax); + public long getSplitSize() { + return (1L << space.getConfiguration().getNfsSplitSizeBits()); } - - int Wtmax = fsInfo.getWtmax(); - if (Wtmax > 0 && ((1 << writeBlockSizeBits) > Wtmax)) { - LOG.warn("Overwrite writeBlockSize with Wtmax gottern from NFS server: " - + (1 << writeBlockSizeBits) + "=>" + Wtmax); - writeBlockSizeBits = 31 - Integer.numberOfLeadingZeros(Wtmax); + + @Override + public String getScheme() { + return NFS_URI_SCHEME; } - } - @Override - public Path getWorkingDirectory() { - return workingDir; - } + @Override + public URI getUri() { + return uri; + } - public BlockLocation[] getFileBlockLocations(Path p, long start, long len) - throws IOException { - if (p == null) { - LOG.debug("END getFileBlockLocations(): file is null"); - throw new NullPointerException(); + @Override + public void close() throws IOException { + List stores = router.getAllStores(); + if(stores != null && stores.size() > 0) { + for(NFSv3FileSystemStore store : stores) { + store.shutdown(); + } + } } - - FileStatus file = getFileStatus(p); - BlockLocation[] locations = getFileBlockLocations(file, start, len); - return locations; - } - - @Override - public BlockLocation[] getFileBlockLocations(FileStatus file, long start, - long len) throws IOException { - if (file == null) { - return null; - } - if (start < 0 || len < 0) { - throw new IllegalArgumentException("Invalid start or len parameter"); - } - if (file.getLen() <= start) { - LOG.info("END getFileBlockLocations(): length is <= start so no locations"); - return new BlockLocation[0]; - } - - String[] name = { hostname + ":" + port }; - String[] host = { hostname }; - BlockLocation locations[] = - new BlockLocation[] { new BlockLocation(name, host, 0, file.getLen()) }; - return locations; - } - - @Override - public void setWorkingDirectory(Path new_dir) { - workingDir = makeAbsolute(new_dir); - } - - private Path makeAbsolute(Path path) { - if (path.isAbsolute() && !path.isAbsoluteAndSchemeAuthorityNull()) { - return path; - } - Path newPath = new Path(workingDir, path); - return newPath; - } - - @Override - public Path getHomeDirectory() { - Path homeDir = makeQualified(new Path("/")); - return homeDir; - } - - public NFSv3FileSystem(NFSv3FileSystemStore store) { - this.store = store; - } - - public NFSv3FileSystem(URI uri, Configuration conf) throws IOException { - initialize(uri, conf); - } - - protected int getWriteBlockSizeBits() { - return writeBlockSizeBits; - } - - public Path makeQualified(Path path) { - checkPath(path); - Path p = path.makeQualified(this.getUri(), this.getWorkingDirectory()); - return p; - } - - private int getNFSPortFromPortMap() throws IOException { - if (uri == null) { - throw new IOException("URI is not defined yet!"); - } - - PortmapClient portmapClient = - new PortmapClient(uri.getHost(), Nfs3Constant.SUN_RPCBIND); - int nfsPort = - portmapClient.getport(Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, - PortmapMapping.TRANSPORT_TCP); - portmapClient.shutdown(); - - return nfsPort; - } - public int getBlockSizeBits() { - return readBlockSizeBits; - } + @Override + public Path getWorkingDirectory() { + return workingDir; + } - @Override - public FSDataInputStream open(Path f, int bufferSize) throws IOException { - f = makeAbsolute(f); - FileHandle handle = getAndVerifyFileHandle(f); - if (handle == null) { - LOG.error("open: handle is undefined for file" + f.toUri().getPath()); - return null; + public BlockLocation[] getFileBlockLocations(Path p, long start, long len) throws IOException { + FileStatus file = getFileStatus(p); + BlockLocation[] locations = getFileBlockLocations(file, start, len); + return locations; } - - return new FSDataInputStream(new BufferedFSInputStream(new NFSBufferedInputStream( - configuration, store, handle, f, this.getConf(), this.getBlockSizeBits(), this - .getUri().getScheme(), this.getSplitSize(), statistics), bufferSize)); - } + @Override + public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) + throws IOException { + if (file == null) { + return null; + } + if (start < 0 || len < 0) { + throw new IllegalArgumentException("Invalid start or len parameter"); + } + if (file.getLen() <= start) { + LOG.info("END getFileBlockLocations(): length is <= start so no locations"); + return new BlockLocation[0]; + } - @Override - public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) - throws IOException { - f = makeAbsolute(f); - FileHandle handle = getAndVerifyFileHandle(f); - if (handle == null) { - throw new IOException("File does not exist: " + f); + //TODO: See if we can provide the actual endpoints for topology awareness scheduling + String hostname = space.getUri().getHost(); + int port = space.getUri().getPort(); + + String[] name = {hostname + ":" + port}; + String[] host = {hostname}; + BlockLocation locations[] + = new BlockLocation[]{new BlockLocation(name, host, 0, file.getLen())}; + return locations; } - - return new FSDataOutputStream(new NFSBufferedOutputStream(configuration, handle, f, store, - this.getWriteBlockSizeBits(), true), statistics); - } - - @Override - public FSDataOutputStream create(Path f, FsPermission permission, - boolean overwrite, int bufferSize, short replication, long blockSize, - Progressable progress) throws IOException { - f = makeAbsolute(f); - FileHandle handle = getAndVerifyFileHandle(f); - - if (handle == null) { - Path parent = f.getParent(); - FileHandle parentHandle=null; - if (parent.isRoot()) { - parentHandle = store.getRootfh(); - } else { - if (mkdirs(parent)) { - parentHandle = getAndVerifyFileHandle(parent); - } else { - throw new IOException("Failed to create parent dir: " + parent); - } - } - - // At this point, we should have a valid parent handle. - if (parentHandle == null){ - throw new IOException("parenthandle is null for creating: " + f); - } - - handle = create(parentHandle, f.getName(), permission); - } - else { - if (overwrite != true) { - throw new IOException("File already exists: " + f); - } - truncate(handle, 0); - } - return new FSDataOutputStream(new NFSBufferedOutputStream(configuration, handle, f, store, - this.getWriteBlockSizeBits(), false), statistics); - } - - public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, - EnumSet flags, int bufferSize, short replication, long blockSize, - Progressable progress) throws IOException { - - if(flags.contains(CreateFlag.APPEND)) { - return append(f, bufferSize, progress); + + @Override + public void setWorkingDirectory(Path new_dir) { + workingDir = makeAbsolute(new_dir); } - else if(flags.contains(CreateFlag.OVERWRITE)) { - return this.create(f, permission, true, bufferSize, replication, blockSize, progress); + + private Path makeAbsolute(Path path) { + if (path == null) { + return null; + } + if (path.isAbsolute() && !path.isAbsoluteAndSchemeAuthorityNull()) { + return path; + } + Path newPath = new Path(workingDir, path); + return newPath; } - else { - return this.create(f, permission, false, bufferSize, replication, blockSize, progress); + + @Override + public Path getHomeDirectory() { + Path homeDir = makeQualified(new Path("/user/" + System.getProperty("user.name"))); + return homeDir; } - - } - @Override - public boolean rename(Path src, Path dst) throws IOException { - src = makeAbsolute(src); - dst = makeAbsolute(dst); - - FileStatus srcStatus; - FileStatus dstStatus; - - LOG.info("Rename from " + src.toUri() + " to " + dst.toUri()); - - // Check status of src and dst paths - try { - srcStatus = getFileStatus(src); - } catch(FileNotFoundException exception) { - srcStatus = null; + @Override + public Path makeQualified(Path path) { + checkPath(path); + Path p = path.makeQualified(this.getUri(), this.getWorkingDirectory()); + return p; } - - try { - dstStatus = getFileStatus(dst); - } catch(FileNotFoundException exception) { - dstStatus = null; + + @Override + public FSDataInputStream open(Path f, int bufferSize) throws IOException { + f = makeAbsolute(f); + NFSv3FileSystemStore store = router.getStore(f); + + // Directories cannot be opened for reading + FileStatus status = getFileStatus(f); + if (status != null && status.isDirectory()) { + throw new FileNotFoundException("open(): cannot open a directory " + f + " for reading"); + } + + FileHandle handle = getAndVerifyFileHandle(f); + if (handle == null) { + LOG.error("open(): file handle is undefined for file" + f); + return null; + } + + return new FSDataInputStream(new BufferedFSInputStream(new NFSBufferedInputStream(store, handle, + f, this.getConf(), this.getSplitSize(), getCredentials(), statistics), bufferSize)); + } - - // Source path must exist - if(srcStatus == null) { - LOG.warn("Source path does not exist"); - return false; + + @Override + public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) + throws IOException { + f = makeAbsolute(f); + NFSv3FileSystemStore store = router.getStore(f); + + FileHandle handle = getAndVerifyFileHandle(f); + if (handle == null) { + throw new FileNotFoundException("append(): file " + f + " does not exist"); + } + + return new FSDataOutputStream(new NFSBufferedOutputStream(configuration, handle, f, store, getCredentials(), true), statistics); + } + + @Override + public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, + int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { + f = makeAbsolute(f); + NFSv3FileSystemStore store = router.getStore(f); + + FileHandle handle = getAndVerifyFileHandle(f); + if (handle == null) { + Path parent = f.getParent(); + + NFSv3FileSystemStore parentStore = router.getStore(parent); + if(!parentStore.equals(store)) { + throw new IOException("Cannot create across two junctions"); + } + + FileHandle parentHandle = null; + if (parent.isRoot() || isRoot(parent)) { + parentHandle = store.getRootFileHandle(); + } else { + if (mkdirs(parent)) { + parentHandle = getAndVerifyFileHandle(parent); + } else { + throw new IOException("create(): failed to create parent dir " + parent); + } + } + + // At this point, we should have a valid parent handle. + if (parentHandle == null) { + throw new IOException("create(): parent handle is null for creating " + f); + } + + handle = create(store, parentHandle, f.getName(), permission); + } else { + FileStatus status = getFileStatus(f); + if (status != null) { + if (status.isDirectory()) { + throw new FileAlreadyExistsException("create(): path " + f + " is already a directory"); + } else { + if (overwrite != true) { + throw new FileAlreadyExistsException("create(): file already exists " + f); + } + truncate(store, handle, 0); + } + } else { + throw new IOException("create(): could not get status of file " + f); + } + } + return new FSDataOutputStream(new NFSBufferedOutputStream(configuration, handle, f, store, + getCredentials(), false), statistics); } - - FileHandle srcParentHandle = getAndVerifyFileHandle(src.getParent()); - FileHandle dstParentHandle = getAndVerifyFileHandle(dst.getParent()); - - if (srcParentHandle == null) { - LOG.warn("Source parent does not exist"); - return false; + + public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, + EnumSet flags, int bufferSize, short replication, long blockSize, + Progressable progress) throws IOException { + if (flags.contains(CreateFlag.APPEND)) { + return append(f, bufferSize, progress); + } else if (flags.contains(CreateFlag.OVERWRITE)) { + return this.create(f, permission, true, bufferSize, replication, blockSize, progress); + } else { + return this.create(f, permission, false, bufferSize, replication, blockSize, progress); + } } - - if (dstParentHandle == null) { - LOG.warn("Destination parent does not exist"); - return false; + + @Override + public boolean rename(Path src, Path dst) throws IOException { + src = makeAbsolute(src); + dst = makeAbsolute(dst); + + NFSv3FileSystemStore srcStore = router.getStore(src); + NFSv3FileSystemStore dstStore = router.getStore(dst); + + FileStatus srcStatus; + FileStatus dstStatus; + + // Cannot rename across filesystems + if (!srcStore.equals(dstStore)) { + throw new IOException("rename(): cannot rename src=" + src + " dst=" + dst + " because they use two different junctions"); + } + + // Check status of src and dst paths + try { + srcStatus = getFileStatus(src); + } catch (FileNotFoundException exception) { + srcStatus = null; + } + + try { + dstStatus = getFileStatus(dst); + } catch (FileNotFoundException exception) { + dstStatus = null; + } + + // Source path must exist + if (srcStatus == null) { + LOG.warn("rename(): source path " + src + " does not exist"); + return false; + } + + if (src.getParent() == null) { + LOG.warn("rename(): root directory " + src + " cannot be renamed"); + return false; + } + + if (dst.getParent() == null) { + LOG.warn("rename(): cannot rename directory to root " + dst); + return false; + } + + // All the paths must be in the same filesystem + NFSv3FileSystemStore srcParentStore = router.getStore(src.getParent()); + NFSv3FileSystemStore dstParentStore = router.getStore(dst.getParent()); + if(!srcStore.equals(srcParentStore) && !srcStore.equals(dstParentStore)) { + throw new IOException("Cannot rename across different junctions"); + } + + FileHandle srcParentHandle = getAndVerifyFileHandle(src.getParent()); + FileHandle dstParentHandle = getAndVerifyFileHandle(dst.getParent()); + + if (srcParentHandle == null) { + LOG.warn("rename(): parent of source " + src + " does not exist"); + return false; + } + + if (dstParentHandle == null) { + LOG.warn("rename(): parent of destination " + dst + " does not exist"); + return false; + } + + int status = Nfs3Status.NFS3ERR_IO; + + // Destination is a file + if (dstStatus != null && dstStatus.isFile()) { + if (srcStatus.isDirectory()) { + LOG.warn("rename(): trying to rename existing directory " + src + " into a file " + dst); + return false; + } else if (srcStatus.isFile()) { + return src.equals(dst); + } else { + throw new IOException("rename(): source " + src + " is neither a file nor a directory"); + } + } // Destination is a directory, so move source into destination + else if (dstStatus != null && dstStatus.isDirectory()) { + FileHandle dstHandle = getAndVerifyFileHandle(dst); + if (dstHandle == null) { + throw new IOException("rename(): destination " + dst + " is a directory but its handle cannot be found"); + } + RENAME3Response rename3Response + = srcStore.rename(srcParentHandle, src.getName(), dstHandle, src.getName(), + getCredentials()); + status = rename3Response.getStatus(); + } // Destination does not exist + else if (dstStatus == null) { + RENAME3Response rename3Response + = srcStore.rename(srcParentHandle, src.getName(), dstParentHandle, dst.getName(), + getCredentials()); + status = rename3Response.getStatus(); + } + + if (status != Nfs3Status.NFS3_OK) { + if (status == Nfs3Status.NFS3ERR_INVAL) { + return false; + } + throw new IOException("rename(): rename of src " + src + " to dst " + dst + " returned status " + status); + } + + // Remove old handles + handleCache.removeAll(Path.getPathWithoutSchemeAndAuthority(src).toString()); + + return true; } - - int status = Nfs3Status.NFS3ERR_IO; - - // Destination is a file - if(dstStatus != null && dstStatus.isFile()) { - if(srcStatus.isDirectory()) { - LOG.warn("Trying to rename file to an existing directory"); - return false; - } else if(srcStatus.isFile()) { - LOG.warn("Trying to rename over an existing file"); - return false; + + @Override + public boolean delete(Path f, boolean recursive) throws IOException { + f = makeAbsolute(f); + NFSv3FileSystemStore store = router.getStore(f); + + FileHandle handle = getAndVerifyFileHandle(f); + if (handle == null) { + LOG.warn("delete(): file " + f + " does not exist"); + return false; + } + + Nfs3FileAttributes attributes = store.getFileAttributes(handle, getCredentials()); + if (attributes == null) { + throw new IOException("delete(): could not get file attributes for " + f); + } + + int fileType = attributes.getType(); + if (fileType == NfsFileType.NFSREG.toValue()) { + return remove(f); + } else if (fileType == NfsFileType.NFSDIR.toValue()) { + Set subPaths = listSubPaths(f); + if (recursive == false && (subPaths != null && subPaths.isEmpty() == false)) { + throw new IOException("delete(): directory " + f + " is not empty so it cannot be deleted"); + } + + FileStatus[] files = listStatus(f); + for (FileStatus fileStatus : files) { + if (delete(fileStatus.getPath(), recursive) == false) { + LOG.warn("delete(): recursive delete failed for " + fileStatus.getPath()); + return false; + } + } + return rmdir(f); } else { - throw new IOException("Source is neither a file nor a directory"); - } - } - // Destination is a directory, so move source into destination - else if(dstStatus != null && dstStatus.isDirectory()) { - FileHandle dstHandle = getAndVerifyFileHandle(dst); - if(dstHandle ==null) { - throw new IOException("Destination is a directory but its handle cannot be found"); - } - RENAME3Response rename3Response = - store.rename(srcParentHandle, src.getName(), dstHandle, src.getName(), - store.getCredentials()); - status = rename3Response.getStatus(); - } - // Destination does not exist - else if(dstStatus == null) { - RENAME3Response rename3Response = - store.rename(srcParentHandle, src.getName(), dstParentHandle, dst.getName(), - store.getCredentials()); - status = rename3Response.getStatus(); + throw new IOException("delete(): file " + f + " is neither a file nor directory"); + } } - - if (status != Nfs3Status.NFS3_OK) { - throw new IOException("rename error status=" + status); + + @Override + public FileStatus[] listStatus(Path f) throws FileNotFoundException, IOException { + f = makeAbsolute(f); + + FileStatus fileStatus = getFileStatus(f); + if (!fileStatus.isDirectory()) { + return new FileStatus[]{fileStatus}; + } + + // Path f is a dir, return a list of all files/dir in this dir + ArrayList fileStatusList = new ArrayList<>(); + for (Path path : listSubPaths(f)) { + FileStatus s1 = getFileStatus(path); + fileStatusList.add(s1); + } + FileStatus[] array = fileStatusList.toArray(new FileStatus[0]); + return array; + } + + @Override + public boolean mkdirs(Path f, FsPermission permission) throws IOException { + f = makeAbsolute(f); + NFSv3FileSystemStore store; + + // Capture root paths of all endpoints + Set rootPaths = new HashSet<>(); + for(Endpoint ep : space.getEndpoints()) { + if(ep.getPath() !=null) { + rootPaths.add(ep.getPath()); + } + } + + // Will check up till the root of one of exported paths + List dirs = new LinkedList<>(); + Path path = Path.getPathWithoutSchemeAndAuthority(f); + do { + dirs.add(0, path.getName()); + path = path.getParent(); + } while (!path.isRoot() && !rootPaths.contains(path.toString())); + + store = router.getStore(path); + FileHandle parentDir = store.getRootFileHandle(); + + path = null; + for (String dir : dirs) { + if (path == null) { + path = new Path(Path.SEPARATOR + dir); + } else { + path = new Path(path.toString() + Path.SEPARATOR + dir); + } + + /* + NFSv3FileSystemStore s = router.getStore(path); + if(!s.equals(store)) { + System.err.println("mkdirs(): path " + f + " parent " + path); + System.err.println("mkdirs(): mismatched " + store + " and " + s); + throw new IOException("Trying to create directories across junctions"); + } + */ + + FileHandle dirHandle = store.getFileHandle(parentDir, dir, getCredentials()); + if(dirHandle != null) { + Nfs3FileAttributes attr = store.getFileAttributes(dirHandle, getCredentials()); + if(attr.getType() != NfsFileType.NFSDIR.toValue()) { + throw new FileAlreadyExistsException("Trying to make subdirectory inside a file"); + } + parentDir = dirHandle; + } + else { + mkdir(store, parentDir, dir, permission); + parentDir = store.getFileHandle(parentDir, dir, getCredentials()); + } + } + return true; + } + + private void checkNFSStatus(FileHandle handle, Path path, int status, String NFSCall) + throws IOException { + /* If not OK or stale handle, then we are in trouble. */ + if (status != Nfs3Status.NFS3_OK && status != Nfs3Status.NFS3ERR_STALE) { + String errorMsg = NFSCall + " error: " + status; + if (path != null) { + errorMsg += " for path " + path.toUri().getPath(); + } + throw new IOException(errorMsg); + } + + /* If handle is stale, remove it from handleCache. */ + if (status == Nfs3Status.NFS3ERR_STALE) { + handleCache.removeByValue(handle); + LOG.warn("NFS_GETATTR failed with status=" + status); + } } - // Remove old handles - handleCache.removeAll(Path.getPathWithoutSchemeAndAuthority(src).toString()); - - return true; - } + private Boolean remove(Path f) throws IOException { + NFSv3FileSystemStore store = router.getStore(f); + + // Parent and child must be on the same filesystem + NFSv3FileSystemStore parentStore = router.getStore(f.getParent()); + if(!parentStore.equals(store)) { + LOG.error("rmdir(): Parent " + f.getParent() + " and child " + f + " are on different junctions"); + throw new IOException("rmdir(): Parent " + f.getParent() + " and child " + f + " are on different junctions"); + } + + FileHandle dirHandle = getAndVerifyFileHandle(f.getParent()); + if (dirHandle == null) { + throw new IOException("remove(): parent of path " + f + " does not exist"); + } + + String pathString = f.toUri().getPath(); + String name = f.getName(); + int status; + REMOVE3Response remove3Response = store.remove(dirHandle, name, getCredentials()); + status = remove3Response.getStatus(); + if (status != Nfs3Status.NFS3_OK) { + throw new IOException("remove(): failed for " + f + " with error status " + status); + } - @Override - public boolean delete(Path f, boolean recursive) throws IOException { - f = makeAbsolute(f); - FileHandle handle = getAndVerifyFileHandle(f); - if (handle == null) { - LOG.info("file " + f.toUri().getPath() + " does not exist"); - return false; + // Remove the stale handle from the handle cache + if (handleCache.get(pathString) != null) { + handleCache.remove(pathString); + } + return true; } - Nfs3FileAttributes attributes = store.getFileAttributes(handle, store.getCredentials()); - if(attributes == null) { - throw new IOException("Could not get file attributes for path=" + f); + private Boolean rmdir(Path f) throws IOException { + int status; + NFSv3FileSystemStore store = router.getStore(f); + + if (f.isRoot() || isRoot(f)) { + LOG.warn("rmdir(): cannot delete root directory"); + return true; + } + + // Parent and child must be on the same filesystem + NFSv3FileSystemStore parentStore = router.getStore(f.getParent()); + if(!parentStore.equals(store)) { + LOG.error("rmdir(): Parent " + f.getParent() + " and child " + f + " are on different junctions"); + throw new IOException("rmdir(): Parent " + f.getParent() + " and child " + f + " are on different junctions"); + } + + String pathString = f.toUri().getPath(); + FileHandle parentDirHandle = getAndVerifyFileHandle(f.getParent()); + String name = f.getName(); + + if (parentDirHandle == null) { + throw new IOException("rmdir(): parent dir " + f.getParent() + " does not exist"); + } + + RMDIR3Response rmdir3Response = store.rmdir(parentDirHandle, name, getCredentials()); + status = rmdir3Response.getStatus(); + if (status != Nfs3Status.NFS3_OK) { + throw new IOException("rmdir(): failed for " + f + " with error status " + status); + } + + // Remove the stale handle from the handle cache + if (handleCache.get(pathString) != null) { + handleCache.remove(pathString); + } + + return true; } - - int fileType = attributes.getType(); - if (fileType == NfsFileType.NFSREG.toValue()) { - return remove(f); - } else if (fileType == NfsFileType.NFSDIR.toValue()) { - Set subPaths = listSubPaths(f); - if(recursive == false && (subPaths != null && subPaths.isEmpty() == false)) { - throw new IOException("Directory is not empty so it cannot be deleted"); - } - - /* - * Return false if recursive is not set. The upper layer seems to handle - * this case already. So, this should not happen. - */ - /* - if (recursive == false) { - return false; - } - */ - FileStatus[] files = listStatus(f); - for (FileStatus fileStatus : files) { - if (delete(fileStatus.getPath(), recursive) == false) { - LOG.error("delete failed for " + fileStatus); - return false; - } - } - return rmdir(f); - } else { - throw new IOException( - "Delete error: file type not supported beyond regular file and dir"); - } - } - - @Override - public FileStatus[] listStatus(Path f) throws FileNotFoundException, - IOException { - f = makeAbsolute(f); - FileStatus fileStatus = getFileStatus(f); - if (!fileStatus.isDirectory()) { - //fileStatus.setPath(Path.getPathWithoutSchemeAndAuthority(f)); - return new FileStatus[] { fileStatus }; - } - - // Path f is a dir, return a list of all files/dir in this dir - ArrayList fileStatusList = new ArrayList(); - for (Path path : listSubPaths(f)) { - FileStatus s1 = getFileStatus(path); - //s1.setPath(Path.getPathWithoutSchemeAndAuthority(path)); - fileStatusList.add(s1); - } - FileStatus[] array = fileStatusList.toArray(new FileStatus[0]); - return array; - } - - - @Override - public boolean mkdirs(Path f, FsPermission permission) throws IOException { - f = makeAbsolute(f); - - List dirs = new LinkedList(); - Path path = Path.getPathWithoutSchemeAndAuthority(f); - do { - dirs.add(0, path.getName()); - path = path.getParent(); - } while (path.isRoot() != true); - - path = null; - FileHandle parentDir = store.getRootfh(); - for (String dir : dirs) { - if (path == null) { - path = new Path(Path.SEPARATOR + dir); - } else { - path = new Path(path.toString() + Path.SEPARATOR + dir); - } - if (handleCache.get(path.toString()) != null) { - // Check for stale filehandle for parentDir - parentDir = handleCache.get(path.toString()); - continue; - } - - // create the subdir and then set parent dir to be subdir and continue. - // The subdir could exist already. - mkdir(parentDir, dir, permission); - - parentDir = store.getFileHandle(parentDir, dir, store.getCredentials()); - if(parentDir != null) { - handleCache.put(path.toString(), parentDir); - } else { - //FIXME Check for stale handle - } - - } - return true; - } - - private void checkNFSStatus(FileHandle handle, Path path, int status, - String NFSCall) throws IOException { - - /* If not OK or stale handle, then we are in trouble. */ - if (status != Nfs3Status.NFS3_OK && status != Nfs3Status.NFS3ERR_STALE) { - String errorMsg = - NFSCall + " error: " + status; - if (path != null) { - errorMsg += " for path " + path.toUri().getPath(); - } - throw new IOException(errorMsg); - } - - /* If handle is stale, remove it from handleCache. */ - if (status == Nfs3Status.NFS3ERR_STALE) { - handleCache.removeByValue(handle); - LOG.warn("NFS_GETATTR failed with status=" + status); - } - } - - private Boolean remove(Path file) throws IOException { - String pathString = file.toUri().getPath(); - FileHandle dirHandle = getAndVerifyFileHandle(file.getParent()); - String name = file.getName(); - int status; - - if (dirHandle == null) { - throw new IOException("remove error: parent dir does not exist"); - } - - REMOVE3Response remove3Response = - store.remove(dirHandle, name, store.getCredentials()); - status = remove3Response.getStatus(); - if (status != Nfs3Status.NFS3_OK) { - throw new IOException("NFS_REMOVE failed for " + file + " with error status=" + status); - } - - // Remove the stale handle from the handle cache - if (handleCache.get(pathString) != null) { - handleCache.remove(pathString); - } - return true; - } - - private Boolean rmdir(Path dir) throws IOException { - int status; - String pathString = dir.toUri().getPath(); - FileHandle parentDirHandle = getAndVerifyFileHandle(dir.getParent()); - String name = dir.getName(); - - if (parentDirHandle == null) { - throw new IOException("rmdir error: parent dir does not exist"); - } - - RMDIR3Response rmdir3Response = - store.rmdir(parentDirHandle, name, store.getCredentials()); - status = rmdir3Response.getStatus(); - if (status != Nfs3Status.NFS3_OK) { - throw new IOException("NFS_RMDIR failed for " + dir + " with error status=" + status); - } - - // Remove the stale handle from the handle cache - if (handleCache.get(pathString) != null) { - LOG.debug("remove handle " + handleCache.get(pathString) + " for file " - + pathString); - handleCache.remove(pathString); - } - - return true; - } - - private Set listSubPaths(Path path) throws IOException { - path = makeAbsolute(path); - Path fsPath = Path.getPathWithoutSchemeAndAuthority(path); - FileStatus fileStatus = getFileStatus(path); - - // Return null if it is a file - if (!fileStatus.isDirectory()) { - return null; - } - - FileHandle handle = getAndVerifyFileHandle(path); - if (handle == null) { - LOG.info("Directory to list does not exist: " + path); - return null; - } - - // Read in all entries in this directory - Set paths = new TreeSet(); - long cookie = 0; - long cookieVerf = 0; - - // Keep fetching directory entries until the list stops - while(true) { - Nfs3DirList dirList = store.getDirectoryList(handle, cookie, cookieVerf, fsInfo.getDtpref(), store.getCredentials()); - if(dirList != null) { - List entryList = dirList.getEntries(); - for(Nfs3DirEntry entry : entryList) { - // Ignore dot and dot-dot entries - if(entry.getName().equals(UNIX_DOT_DIR) || entry.getName().equals(UNIX_DOT_DOT_DIR)) { - continue; - } - - Path newPath; - if(fsPath.isRoot()) { - newPath = new Path(Path.SEPARATOR + entry.getName()); - } else { - newPath = new Path(fsPath.toString() + Path.SEPARATOR + entry.getName()); - } - paths.add(newPath); - cookie = entry.getCookie(); - } - } - - // Check for more entries - if(dirList.isEof()) { - break; - } else { - cookieVerf = dirList.getCookieVerf(); - } - } - - return paths; - } - - private void mkdir(FileHandle dir, String name, FsPermission permission) - throws IOException { - int status; - - EnumSet updateFields = EnumSet.noneOf(SetAttrField.class); - /* - * Note we do not set a specific size for a directory. NFS server should be - * able to figure it out when creating it. We also not set the mtime and - * ctime. Use the timestamp at the server machine. - */ - updateFields.add(SetAttr3.SetAttrField.UID); - updateFields.add(SetAttr3.SetAttrField.GID); - updateFields.add(SetAttr3.SetAttrField.MODE); - Nfs3SetAttr objAttr = new Nfs3SetAttr(permission.toShort(), NFS_UID, NFS_GID, 0, - null, null, updateFields); - - MKDIR3Response mkdir3Response = - store.mkdir(dir, name, objAttr, store.getCredentials()); - status = mkdir3Response.getStatus(); - if (status != Nfs3Status.NFS3_OK) { - if (status == Nfs3Status.NFS3ERR_EXIST) { - LOG.debug("NFSFileSystem mkdir: already exists for handle=" + dir - + ", dir=" + name); - } else { - throw new IOException("NFSFileSystem mkdir error: status=" + status); - } - } - - } - - private FileHandle create(FileHandle dir, String name, FsPermission permission) - throws IOException { - FileHandle handle = null; - - EnumSet updateFields = EnumSet.noneOf(SetAttrField.class); - updateFields.add(SetAttr3.SetAttrField.UID); - updateFields.add(SetAttr3.SetAttrField.GID); - updateFields.add(SetAttr3.SetAttrField.MODE); - Nfs3SetAttr objAttr = new Nfs3SetAttr(permission.toShort(), NFS_UID, NFS_GID, 0, - null, null, updateFields); - - CREATE3Response create3Response = - store.create(dir, name, Nfs3Constant.CREATE_UNCHECKED, objAttr, 0, - store.getCredentials()); - int status = create3Response.getStatus(); - if (status != Nfs3Status.NFS3_OK) { - throw new IOException("NFSFileSystem create error: status=" + status); + + private Set listSubPaths(Path f) throws IOException { + f = makeAbsolute(f); + NFSv3FileSystemStore store = router.getStore(f); + Path fsPath = Path.getPathWithoutSchemeAndAuthority(f); + FileStatus fileStatus = getFileStatus(f); + + // Return null if it is a file + if (!fileStatus.isDirectory()) { + return null; + } + + FileHandle handle = getAndVerifyFileHandle(f); + if (handle == null) { + LOG.info("Directory to list does not exist: " + f); + return null; + } + + // Read in all entries in this directory + Set paths = new TreeSet<>(); + long cookie = 0; + long cookieVerf = 0; + + // Keep fetching directory entries until the list stops + while (true) { + Nfs3DirList dirList + = store.getDirectoryList(handle, cookie, cookieVerf, store.getDirListSize(), + getCredentials()); + if (dirList != null) { + List entryList = dirList.getEntries(); + for (Nfs3DirEntry entry : entryList) { + cookie = entry.getCookie(); + // Ignore dot and dot-dot entries + if (entry.getName().equals(UNIX_DOT_DIR) || entry.getName().equals(UNIX_DOT_DOT_DIR) || entry.getName().equals(".vsadmin")) { + continue; + } + + Path newPath; + if (fsPath.isRoot()) { + newPath = new Path(Path.SEPARATOR + entry.getName()); + } else { + newPath = new Path(fsPath.toString() + Path.SEPARATOR + entry.getName()); + } + paths.add(newPath); + + } + } + + // Check for more entries + if (dirList == null || dirList.isEof()) { + break; + } else { + cookieVerf = dirList.getCookieVerf(); + } + } + + return paths; + } + + private boolean mkdir(NFSv3FileSystemStore store, FileHandle dir, String name, FsPermission permission) throws IOException { + int status; + EnumSet updateFields = EnumSet.noneOf(SetAttrField.class); + /* + * Note we do not set a specific size for a directory. NFS server should be able to figure it + * out when creating it. We also not set the mtime and ctime. Use the timestamp at the server + * machine. + */ + updateFields.add(SetAttr3.SetAttrField.UID); + updateFields.add(SetAttr3.SetAttrField.GID); + updateFields.add(SetAttr3.SetAttrField.MODE); + Nfs3SetAttr objAttr + = new Nfs3SetAttr(permission.toShort(), NFS_UID, NFS_GID, 0, null, null, updateFields); + + MKDIR3Response mkdir3Response = store.mkdir(dir, name, objAttr, getCredentials()); + status = mkdir3Response.getStatus(); + if (status != Nfs3Status.NFS3_OK) { + if (status == Nfs3Status.NFS3ERR_EXIST) { + LOG.error("mkdir(): Could not create directory with name " + name); + throw new FileAlreadyExistsException(); + } else if (status == Nfs3Status.NFS3ERR_NOTDIR) { + throw new ParentNotDirectoryException(); + } else { + throw new IOException("mkdir(): returned error status " + status); + } + } + return true; + } + + private FileHandle create(NFSv3FileSystemStore store, FileHandle dir, String name, FsPermission permission) + throws IOException { + EnumSet updateFields = EnumSet.noneOf(SetAttrField.class); + updateFields.add(SetAttr3.SetAttrField.UID); + updateFields.add(SetAttr3.SetAttrField.GID); + updateFields.add(SetAttr3.SetAttrField.MODE); + Nfs3SetAttr objAttr + = new Nfs3SetAttr(permission.toShort(), NFS_UID, NFS_GID, 0, null, null, updateFields); + + CREATE3Response create3Response + = store.create(dir, name, Nfs3Constant.CREATE_UNCHECKED, objAttr, 0, getCredentials()); + int status = create3Response.getStatus(); + if (status != Nfs3Status.NFS3_OK) { + throw new IOException("create(): returned error status " + status); + } + + FileHandle handle = create3Response.getObjHandle(); + return handle; } - - handle = create3Response.getObjHandle(); - return handle; - } - - private void truncate(FileHandle handle, long newSize) throws IOException { - int status; - EnumSet updateFields = EnumSet.noneOf(SetAttrField.class); - updateFields.add(SetAttr3.SetAttrField.SIZE); - Nfs3SetAttr objAttr = new Nfs3SetAttr(); - objAttr.setUpdateFields(updateFields); - objAttr.setSize(newSize); - - SETATTR3Response setAttr3Response = - store.setattr(handle, objAttr, false, null, store.getCredentials()); - status = setAttr3Response.getStatus(); - checkNFSStatus(handle, null, status, "NFS_SETATTR"); - } - - @SuppressWarnings("unused") - @Deprecated - private FileHandle getFileHandle(Path path) throws IOException { - path = makeAbsolute(path); - Path FsPath = Path.getPathWithoutSchemeAndAuthority(path); - FileHandle handle = null; - - if (FsPath.isRoot()) { - return store.getRootfh(); - } - - handle = handleCache.get(FsPath.toString()); - if (handle != null) { - return handle; - } else { - FileHandle parentHandle = getFileHandle(path.getParent()); - if (parentHandle == null) { - return null; - } - - handle = store.getFileHandle(parentHandle, FsPath.getName(), store.getCredentials()); - // FIXME Check that handle is not stale and checkNFSStatus - handleCache.put(FsPath.toString(), handle); - } - - return handle; - } - - private FileHandle getAndVerifyFileHandle(Path path) throws IOException { - path = makeAbsolute(path); - int status; - Path FsPath = Path.getPathWithoutSchemeAndAuthority(path); - FileHandle handle = null; - - if (FsPath.isRoot()) { - handle = store.getRootfh(); - GETATTR3Response getAttr3Response = store.getattr(handle, store.getCredentials()); - status = getAttr3Response.getStatus(); - if (status != Nfs3Status.NFS3_OK) { - throw new IOException("NFS_GETATTR failed for root handle with error: status=" + status); - } - return handle; - } - - // If the handle is in the cache and valid, return it - handle = handleCache.get(FsPath.toString()); - if (handle != null) { - GETATTR3Response getAttr3Response = store.getattr(handle, store.getCredentials()); - status = getAttr3Response.getStatus(); - checkNFSStatus(handle, FsPath, status, "NFS_GETATTR"); - if (status == Nfs3Status.NFS3_OK) { + + private void truncate(NFSv3FileSystemStore store, FileHandle handle, long newSize) throws IOException { + int status; + EnumSet updateFields = EnumSet.noneOf(SetAttrField.class); + updateFields.add(SetAttr3.SetAttrField.SIZE); + Nfs3SetAttr objAttr = new Nfs3SetAttr(); + objAttr.setUpdateFields(updateFields); + objAttr.setSize(newSize); + + SETATTR3Response setAttr3Response + = store.setattr(handle, objAttr, false, null, getCredentials()); + status = setAttr3Response.getStatus(); + checkNFSStatus(handle, null, status, "NFS_SETATTR"); + } + + private FileHandle getAndVerifyFileHandle(Path path) throws IOException { + int status; + if (path == null) { + return null; + } + + path = makeAbsolute(path); + NFSv3FileSystemStore store = router.getStore(path); + Path fsPath = Path.getPathWithoutSchemeAndAuthority(path); + FileHandle handle; + + // Root paths (top root or junctioned-root) + if (fsPath.isRoot() || isRoot(path)) { + handle = store.getRootFileHandle(); + GETATTR3Response getAttr3Response = store.getattr(handle, getCredentials()); + status = getAttr3Response.getStatus(); + if (status != Nfs3Status.NFS3_OK) { + throw new IOException("getAndVerifyHandle(): Could not get attributes for path " + path); + } + return handle; + } + + // Make sure parent and child are in the same junctioned filesystem + NFSv3FileSystemStore parentStore = router.getStore(path.getParent()); + if(!parentStore.equals(store)) { + throw new IOException("getAndVerifyHandle(): Parent " + path.getParent() + " and child " + path + " are not on the same filesystem!"); + } + + // If the handle is in the cache and valid, return it + handle = handleCache.get(fsPath.toString()); + if (handle != null) { + GETATTR3Response getAttr3Response = store.getattr(handle, getCredentials()); + status = getAttr3Response.getStatus(); + if (status == Nfs3Status.NFS3_OK) { + return handle; + } else { + // we have a stale handle in the handle cache, remove it + assert (status == Nfs3Status.NFS3ERR_STALE); + handleCache.remove(fsPath.toString()); + } + } + + // else, get the valid parent handle and then lookup for the handle + FileHandle parentHandle = getAndVerifyFileHandle(path.getParent()); + if (parentHandle == null) { + LOG.info("getAndVerifyHandle(): Parent path " + path.getParent() + " could not be found"); + return null; + } + + handle = store.getFileHandle(parentHandle, fsPath.getName(), getCredentials()); + if(handle != null) { + handleCache.put(fsPath.toString(), handle); + } return handle; - } else { - // we have a stale handle in the handle cache, remove it - assert(status == Nfs3Status.NFS3ERR_STALE); - handleCache.remove(FsPath.toString()); - } - } - - // else, get the valid parent handle and then lookup for the handle - FileHandle parentHandle = getAndVerifyFileHandle(path.getParent()); - if (parentHandle == null) { - return null; - } - handle = store.getFileHandle(parentHandle, FsPath.getName(), store.getCredentials()); - //FIXME Check that handle is not stale - handleCache.put(FsPath.toString(), handle); - return handle; - } - - @Override - public FileStatus getFileStatus(Path f) throws IOException { - f = makeAbsolute(f); - FileStatus fileStatus = null; - - FileHandle handle = getAndVerifyFileHandle(f); - if (handle == null) { - throw new FileNotFoundException("File does not exist: " + f); } - Nfs3FileAttributes fileAttr = store.getFileAttributes(handle, store.getCredentials()); - if(fileAttr == null) { - throw new IOException("Could not get file attributes for path=" + f); + @Override + public FileStatus getFileStatus(Path f) throws IOException { + f = makeAbsolute(f); + NFSv3FileSystemStore store = router.getStore(f); + + FileHandle handle = getAndVerifyFileHandle(f); + if (handle == null) { + throw new FileNotFoundException("getFileStatus(): file " + f + " does not exist"); + } + + Nfs3FileAttributes fileAttr = store.getFileAttributes(handle, getCredentials()); + if (fileAttr == null) { + throw new IOException("getFileStatus(): could not get attributes of file " + f); + } + + Boolean isDir = false; + if (fileAttr.getType() == NfsFileType.NFSDIR.toValue()) { + isDir = true; + } + + FileStatus fileStatus + = new FileStatus(fileAttr.getSize(), isDir, 1, getSplitSize(), fileAttr.getMtime() + .getMilliSeconds(), fileAttr.getAtime().getMilliSeconds(), new FsPermission( + (short) fileAttr.getMode()), NFS_USER_NAME, NFS_GROUP_NAME, f.makeQualified(uri, + workingDir)); + return fileStatus; + } + + protected boolean isRoot(Path path) throws IOException { + if(space == null) { + throw new IOException("isRoot(); Namespace is null"); + } + if(path == null) { + throw new IOException("isRoot(): Path is null"); + } + + NFSv3FileSystemStore store = router.getStore(path); + Path fsPath = Path.getPathWithoutSchemeAndAuthority(path); + return store.getEndpoint().getPath().equals(fsPath.toString()); } - Boolean isDir = false; - if (fileAttr.getType() == NfsFileType.NFSDIR.toValue()) { - isDir = true; - } - - fileStatus = - new FileStatus(fileAttr.getSize(), isDir, 1, getSplitSize(), - fileAttr.getMtime().getMilliSeconds(), fileAttr.getAtime() - .getMilliSeconds(), - new FsPermission((short) fileAttr.getMode()), - NFS_USER_NAME, NFS_GROUP_NAME, f.makeQualified(uri, - workingDir)); - return fileStatus; - } - -} // End class \ No newline at end of file + protected Credentials getCredentials() throws IOException { + if (space == null) { + throw new IOException("No namespace defined!"); + } + + NamespaceOptions options = space.getConfiguration(); + String authScheme = (options.getNfsAuthScheme() == null) ? NamespaceOptions.getDefaultOptions().getNfsAuthScheme() : options.getNfsAuthScheme(); + if (authScheme.equalsIgnoreCase("AUTH_SYS") || authScheme.equalsIgnoreCase("AUTH_UNIX")) { + CredentialsSys sys = new CredentialsSys(); + sys.setUID(options.getNfsUid()); + sys.setGID(options.getNfsGid()); + sys.setStamp(new Long(System.currentTimeMillis()).intValue()); + return sys; + } else { + return new CredentialsNone(); + } + } + +} // End class diff --git a/src/main/java/org/apache/hadoop/fs/nfs/NFSv3FileSystemStore.java b/src/main/java/org/apache/hadoop/fs/nfs/NFSv3FileSystemStore.java index 093f3cb..f10469a 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/NFSv3FileSystemStore.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/NFSv3FileSystemStore.java @@ -1,32 +1,36 @@ /** * Copyright 2014 NetApp Inc. All Rights Reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. */ - package org.apache.hadoop.fs.nfs; import java.io.IOException; import java.net.URI; +import java.util.Objects; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.nfs.mount.MountClient; import org.apache.hadoop.fs.nfs.mount.MountMNTResponse; import org.apache.hadoop.fs.nfs.portmap.PortmapClient; import org.apache.hadoop.fs.nfs.rpc.RpcClient; import org.apache.hadoop.fs.nfs.rpc.RpcException; +import org.apache.hadoop.fs.nfs.topology.Endpoint; +import org.apache.hadoop.fs.nfs.topology.Namespace; +import org.apache.hadoop.fs.nfs.topology.NamespaceOptions; +import org.apache.hadoop.mount.MountResponse; import org.apache.hadoop.nfs.NfsTime; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; @@ -61,524 +65,554 @@ import org.apache.hadoop.portmap.PortmapMapping; @InterfaceAudience.Private -@InterfaceStability.Unstable -public class NFSv3FileSystemStore extends RpcClient { - private final Credentials credentials; - - public Credentials getCredentials() { - return credentials; - } - - public NFSv3FileSystemStore(URI uri, Configuration conf, int numConnections) throws IOException { - - super(uri.getHost(), uri.getPort()); - - /* Prepare the credentials. */ - if (conf != null) { - String credentialsFlavor = conf.get("fs.nfs.auth.flavor", null); - - if (credentialsFlavor != null) { - // Use no authentication (AUTH_NONE) - if (credentialsFlavor.equalsIgnoreCase("AUTH_NONE")) { - credentials = new CredentialsNone(); - } - // Use UNIX authentication (AUTH_SYS) - else if (credentialsFlavor.equalsIgnoreCase("AUTH_SYS") || credentialsFlavor.equalsIgnoreCase("AUTH_UNIX")) { - CredentialsSys sys = new CredentialsSys(); - sys.setUID(0); - sys.setGID(0); - sys.setStamp(new Long(System.currentTimeMillis()).intValue()); - credentials = sys; - } - else { - LOG.error("Authentication flavor is not supported!"); - throw new IOException("Authentication flavor is not supported!"); - } - } else { - credentials = new CredentialsNone(); - } - } else { - credentials = new CredentialsNone(); +public final class NFSv3FileSystemStore extends RpcClient { + + NFSv3FileSystem fs; + Namespace space; + Endpoint ep; + FileHandle rootHandle; + Nfs3Info fsInfo; + Long fsId; + + public static final Log LOG = LogFactory.getLog(NFSv3FileSystemStore.class); + + public NFSv3FileSystemStore(NFSv3FileSystem fs, Namespace space) throws IOException { + this(fs, space, space.getDefaultEndpoint()); } - } - FileHandle rootfh; + public NFSv3FileSystemStore(NFSv3FileSystem fs, Namespace space, Endpoint ep) throws IOException { - public FileHandle getRootfh() { - return rootfh; - } + super(ep.getUri().getHost(), ep.getUri().getPort()); - public static final Log LOG = LogFactory.getLog(NFSv3FileSystemStore.class); + NamespaceOptions options = space.getConfiguration(); + URI uri = ep.getUri(); + String mountDirectory = null; + this.fs = fs; + this.space = space; + this.ep = ep; - public void initialize(URI uri, Configuration conf) throws IOException { - LOG.debug("initialization: " + uri); - - if(conf == null) { - throw new IOException("No configuration given!"); - } - - // For testing, we can override the Portmap and Mount ports - // so they won't be checked for - if(conf.getBoolean("mambo.test", false) == false) { - PortmapClient portmapClient = - new PortmapClient(uri.getHost(), Nfs3Constant.SUN_RPCBIND); - int mountdPort = - portmapClient.getport(MountClient.MOUNTD_PROGRAM, - MountClient.MOUNTD_VERSION, PortmapMapping.TRANSPORT_TCP); - MountClient mountClient = - new MountClient(uri.getHost(), mountdPort, conf); - - String mountDir = conf.get("fs." + uri.getScheme() + ".mountdir", null); - if (mountDir == null) { - throw new IOException("mount dir is not specified in configuration file"); - } - LOG.debug("MountDir: " + mountDir); - MountMNTResponse mountMNTResponse = mountClient.mnt(mountDir); - if (mountMNTResponse == null) { - LOG.error("mountMNTResponse is null"); - return; - } - - if (mountMNTResponse.getStatus() != Nfs3Status.NFS3_OK) { - LOG.error("failed to mount root path " + mountDir + " from host " - + uri.getHost() + ", status=" + mountMNTResponse.getStatus()); - return; - } - LOG.debug(mountMNTResponse); - rootfh = mountMNTResponse.getFilehandle(); - mountClient.shutdown(); - portmapClient.shutdown(); + // Find MOUNT port + int mountPort = options.getNfsMountPort(); + if (options.getNfsMountPort() == NamespaceOptions.INVALID_PORT) { + PortmapClient portmap = new PortmapClient(uri.getHost(), options.getNfsRpcbindPort()); + mountPort = portmap.getport(MountClient.MOUNTD_PROGRAM, MountClient.MOUNTD_VERSION, PortmapMapping.TRANSPORT_TCP); + portmap.shutdown(); + } + + MountClient mount = new MountClient(space, uri.getHost(), mountPort); + if (ep.getExportPath() != null && ep.getExportPath().length() > 0) { + mountDirectory = ep.getExportPath(); + } else if (options.getNfsExportPath() != null && options.getNfsExportPath().length() > 0) { + mountDirectory = options.getNfsExportPath(); + } else { + mountDirectory = NamespaceOptions.getDefaultOptions().getNfsExportPath(); + } + + MountMNTResponse mntResponse = mount.mnt(mountDirectory); + if (mntResponse == null || mntResponse.getStatus() != MountResponse.MNT_OK) { + LOG.error("Could not get root file handle for endpoint ep=" + ep); + throw new IOException("Could not get root file handle"); + } + rootHandle = mntResponse.getFilehandle(); + + Nfs3FileAttributes attr = this.getFileAttributes(getRootFileHandle(), fs.getCredentials()); + if(attr == null) { + throw new IOException("Could not get filesystem id"); + } + fsId = attr.getFsid(); + System.out.println("Store with ep " + ep + " has fsId " + fsId); + + mount.shutdown(); } - - // During testing, it bypasses Portmap and Mount and calls directly into NFS - else { - int mountPort = conf.getInt("fs.nfs.mount.port", uri.getPort()); - String mountPath = conf.get("fs.nfs.mountdir", "/somepath"); - MountClient mountClient = new MountClient(uri.getHost(), mountPort, conf); - MountMNTResponse mountMNTResponse = mountClient.mnt(mountPath); - rootfh = mountMNTResponse.getFilehandle(); - mountClient.shutdown(); + + public void initialize() throws IOException { + // FSINFO and set limits on read/write size + Nfs3Info info = getFilesystemInfo(getRootFileHandle(), fs.getCredentials()); + if(info == null) { + throw new IOException("Could not get filesystem info for uri=" + ep.getUri()); + } + fsInfo = info; } - - } - public void nullOp() throws IOException { + @Override + public int hashCode() { + int hash = 5; + hash = 61 * hash + Objects.hashCode(this.fs); + hash = 61 * hash + Objects.hashCode(this.space); + hash = 61 * hash + Objects.hashCode(this.fsId); + return hash; + } - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; - - // Issue the call - acceptState = service(NFSPROC3.NULL, in, out, new CredentialsNone()); - if(acceptState != AcceptState.SUCCESS) { - LOG.error("NFS NULL resulted in accept state=" + acceptState); - throw new IOException("NFS NULL resulted in accept state=" + acceptState); + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + final NFSv3FileSystemStore other = (NFSv3FileSystemStore) obj; + if (!Objects.equals(this.fs, other.fs)) { + return false; + } + if (!Objects.equals(this.space, other.space)) { + return false; + } + if (!Objects.equals(this.fsId, other.fsId)) { + return false; + } + return true; } - } - - public COMMIT3Response commit(FileHandle file, long offset, int count, - Credentials credentials) throws IOException { - - COMMIT3Response commit3Response; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; - // Construct the NFS request - NFSv3RequestBuilder.buildCOMMIT3Request(in, file, offset, count); - // Issue the call - acceptState = service(NFSPROC3.COMMIT, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - commit3Response = NFSv3ResponseBuilder.buildCOMMIT3Response(out.asReadOnlyWrap()); - return commit3Response; - } else { - LOG.error("NFS COMMIT3 resulted in accept state=" + acceptState); - throw new IOException("NFS COMMIT3 resulted in accept state=" + acceptState); + public Endpoint getEndpoint() { + return ep; } - - } - - public CREATE3Response create(FileHandle handle, String name, int mode, - Nfs3SetAttr objAttr, long verf, Credentials credentials) throws IOException { - - CREATE3Response create3Response; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; - - // Construct the NFS request - NFSv3RequestBuilder.buildCREATE3Request(in, handle, name, mode, objAttr, verf); - // Issue the call - acceptState = service(NFSPROC3.CREATE, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - create3Response = NFSv3ResponseBuilder.buildCREATE3Response(out.asReadOnlyWrap()); - return create3Response; - } else { - LOG.error("NFS CREATE3 resulted in accept state=" + acceptState); - throw new IOException("NFS CREATE3 resulted in accept state=" + acceptState); + public long getFilesystemId() { + return fsId; } - } - - public FSINFO3Response fsinfo(FileHandle handle, Credentials credentials) throws IOException { - FSINFO3Response fsinfo3Response; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; + public FileHandle getRootFileHandle() { + return rootHandle; + } - // Construct the NFS request - NFSv3RequestBuilder.buildFSINFO3Request(in, handle); + public int getReadSizeBits() { + int rtMax = fsInfo.getRtmax(); + int readBlockSizeBits = space.getConfiguration().getNfsReadSizeBits(); + if((rtMax > 0) && ((1 << readBlockSizeBits) > rtMax)) { + readBlockSizeBits = 31 - Integer.numberOfLeadingZeros(rtMax); + } + return readBlockSizeBits; + } - // Issue the call - acceptState = service(NFSPROC3.FSINFO, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - fsinfo3Response = NFSv3ResponseBuilder.buildFSINFO3Response(out.asReadOnlyWrap()); - return fsinfo3Response; - } else { - LOG.error("NFS FSINFO3 resulted in accept state=" + acceptState); - throw new IOException("NFS FSINFO3 resulted in accept state=" + acceptState); + public int getWriteSizeBits() { + int wtMax = fsInfo.getWtmax(); + int writeBlockSizeBits = space.getConfiguration().getNfsWriteSizeBits(); + if((wtMax > 0) && ((1 << writeBlockSizeBits) > wtMax)) { + writeBlockSizeBits = 31 - Integer.numberOfLeadingZeros(wtMax); + } + return writeBlockSizeBits; } - } - - - public GETATTR3Response getattr(FileHandle handle, Credentials credentials) throws IOException { - - GETATTR3Response getattr3Response; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; - - // Construct the NFS request - NFSv3RequestBuilder.buildGETATTR3Request(in, handle); - // Issue the call - acceptState = service(NFSPROC3.GETATTR, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - getattr3Response = NFSv3ResponseBuilder.buildGETATTR3Response(out.asReadOnlyWrap()); - return getattr3Response; - } else { - LOG.error("NFS GETATTR3 resulted in accept state=" + acceptState); - throw new IOException("NFS GETATTR3 resulted in accept state=" + acceptState); + public int getDirListSize() { + return fsInfo.getDtpref(); } - } - - public LOOKUP3Response lookup(FileHandle dir, String name, - Credentials credentials) throws IOException { - LOOKUP3Response lookup3Response; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; + public void nullOp() throws IOException { - // Construct the NFS request - NFSv3RequestBuilder.buildLOOKUP3Request(in, dir, name); - - // Issue the call - acceptState = service(NFSPROC3.LOOKUP, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - lookup3Response = NFSv3ResponseBuilder.buildLOOKUP3Response(out.asReadOnlyWrap()); - return lookup3Response; - } else { - LOG.error("NFS LOOKUP3 resulted in accept state=" + acceptState); - throw new IOException("NFS LOOKUP3 resulted in accept state=" + acceptState); - } - } + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + // Issue the call + acceptState = service(NFSPROC3.NULL, in, out, new CredentialsNone()); + if (acceptState != AcceptState.SUCCESS) { + LOG.error("NFS NULL resulted in accept state=" + acceptState); + throw new IOException("NFS NULL resulted in accept state=" + acceptState); + } + } - public MKDIR3Response mkdir(FileHandle handle, String name, Nfs3SetAttr objAttr, - Credentials credentials) throws IOException { - - MKDIR3Response mkdir3Response; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; + public COMMIT3Response commit(FileHandle file, long offset, int count, Credentials credentials) + throws IOException { + + COMMIT3Response commit3Response; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildCOMMIT3Request(in, file, offset, count); + + // Issue the call + acceptState = service(NFSPROC3.COMMIT, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + commit3Response = NFSv3ResponseBuilder.buildCOMMIT3Response(out.asReadOnlyWrap()); + return commit3Response; + } else { + LOG.error("NFS COMMIT3 resulted in accept state=" + acceptState); + throw new IOException("NFS COMMIT3 resulted in accept state=" + acceptState); + } - // Construct the NFS request - NFSv3RequestBuilder.buildMKDIR3Request(in, handle, name, objAttr); - - // Issue the call - acceptState = service(NFSPROC3.MKDIR, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - mkdir3Response = NFSv3ResponseBuilder.buildMKDIR3Response(out.asReadOnlyWrap()); - return mkdir3Response; - } else { - LOG.error("NFS MKDIR3 resulted in accept state=" + acceptState); - throw new IOException("NFS MKDIR3 resulted in accept state=" + acceptState); } - } + public CREATE3Response create(FileHandle handle, String name, int mode, Nfs3SetAttr objAttr, + long verf, Credentials credentials) throws IOException { + + CREATE3Response create3Response; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildCREATE3Request(in, handle, name, mode, objAttr, verf); + + // Issue the call + acceptState = service(NFSPROC3.CREATE, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + create3Response = NFSv3ResponseBuilder.buildCREATE3Response(out.asReadOnlyWrap()); + return create3Response; + } else { + LOG.error("NFS CREATE3 resulted in accept state=" + acceptState); + throw new IOException("NFS CREATE3 resulted in accept state=" + acceptState); + } + } + public FSINFO3Response fsinfo(FileHandle handle, Credentials credentials) throws IOException { - public READ3Response read(FileHandle handle, long offset, int count, - Credentials credentials) throws IOException { - - READ3Response read3Response; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; + FSINFO3Response fsinfo3Response; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; - // Construct the NFS request - NFSv3RequestBuilder.buildREAD3Request(in, handle, offset, count); - - // Issue the call - acceptState = service(NFSPROC3.READ, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - read3Response = NFSv3ResponseBuilder.buildREAD3Response(out.asReadOnlyWrap()); - return read3Response; - } else { - LOG.error("NFS READ3 resulted in accept state=" + acceptState); - throw new IOException("NFS READ3 resulted in accept state=" + acceptState); + // Construct the NFS request + NFSv3RequestBuilder.buildFSINFO3Request(in, handle); + + // Issue the call + acceptState = service(NFSPROC3.FSINFO, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + fsinfo3Response = NFSv3ResponseBuilder.buildFSINFO3Response(out.asReadOnlyWrap()); + return fsinfo3Response; + } else { + LOG.error("NFS FSINFO3 resulted in accept state=" + acceptState); + throw new IOException("NFS FSINFO3 resulted in accept state=" + acceptState); + } } - } + public GETATTR3Response getattr(FileHandle handle, Credentials credentials) throws IOException { + GETATTR3Response getattr3Response; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; - public READDIR3Response readdir(FileHandle dir, long cookie, long cookieVerf, - int count, Credentials credentials) throws IOException { - - READDIR3Response readdir3Response; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; + // Construct the NFS request + NFSv3RequestBuilder.buildGETATTR3Request(in, handle); - // Construct the NFS request - NFSv3RequestBuilder.buildREADDIR3Request(in, dir, cookie, cookieVerf, count); - - // Issue the call - acceptState = service(NFSPROC3.READDIR, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - readdir3Response = NFSv3ResponseBuilder.buildREADDIR3Response(out.asReadOnlyWrap()); - return readdir3Response; - } else { - LOG.error("NFS READDIR3 resulted in accept state=" + acceptState); - throw new IOException("NFS READDIR3 resulted in accept state=" + acceptState); + // Issue the call + acceptState = service(NFSPROC3.GETATTR, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + getattr3Response = NFSv3ResponseBuilder.buildGETATTR3Response(out.asReadOnlyWrap()); + return getattr3Response; + } else { + LOG.error("NFS GETATTR3 resulted in accept state=" + acceptState); + throw new IOException("NFS GETATTR3 resulted in accept state=" + acceptState); + } } - } - - public REMOVE3Response remove(FileHandle dir, String name, - Credentials credentials) throws IOException { - - REMOVE3Response remove3Response = null; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; - // Construct the NFS request - NFSv3RequestBuilder.buildREMOVE3Request(in, dir, name); - - // Issue the call - acceptState = service(NFSPROC3.REMOVE, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - remove3Response = NFSv3ResponseBuilder.buildREMOVE3Response(out.asReadOnlyWrap()); - return remove3Response; - } else { - LOG.error("NFS REMOVE3 resulted in accept state=" + acceptState); - throw new IOException("NFS REMOVE3 resulted in accept state=" + acceptState); + public LOOKUP3Response lookup(FileHandle dir, String name, Credentials credentials) + throws IOException { + + LOOKUP3Response lookup3Response; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildLOOKUP3Request(in, dir, name); + + // Issue the call + acceptState = service(NFSPROC3.LOOKUP, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + lookup3Response = NFSv3ResponseBuilder.buildLOOKUP3Response(out.asReadOnlyWrap()); + return lookup3Response; + } else { + LOG.error("NFS LOOKUP3 resulted in accept state=" + acceptState); + throw new IOException("NFS LOOKUP3 resulted in accept state=" + acceptState); + } } - } - - - public RENAME3Response rename(FileHandle fromDir, String fromName, - FileHandle toDir, String toName, Credentials credentials) throws IOException { - - RENAME3Response rename3Response = null; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; - - // Construct the NFS request - NFSv3RequestBuilder.buildRENAME3Request(in, fromDir, fromName, toDir, toName); - - // Issue the call - acceptState = service(NFSPROC3.RENAME, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - rename3Response = NFSv3ResponseBuilder.buildRENAME3Response(out.asReadOnlyWrap()); - return rename3Response; - } else { - LOG.error("NFS RENAME3 resulted in accept state=" + acceptState); - throw new IOException("NFS RENAME3 resulted in accept state=" + acceptState); + public MKDIR3Response mkdir(FileHandle handle, String name, Nfs3SetAttr objAttr, + Credentials credentials) throws IOException { + + MKDIR3Response mkdir3Response; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildMKDIR3Request(in, handle, name, objAttr); + + // Issue the call + acceptState = service(NFSPROC3.MKDIR, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + mkdir3Response = NFSv3ResponseBuilder.buildMKDIR3Response(out.asReadOnlyWrap()); + return mkdir3Response; + } else { + LOG.error("NFS MKDIR3 resulted in accept state=" + acceptState); + throw new IOException("NFS MKDIR3 resulted in accept state=" + acceptState); + } } - } - + public READ3Response read(FileHandle handle, long offset, int count, Credentials credentials) + throws IOException { + + READ3Response read3Response; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildREAD3Request(in, handle, offset, count); + + // Issue the call + acceptState = service(NFSPROC3.READ, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + read3Response = NFSv3ResponseBuilder.buildREAD3Response(out.asReadOnlyWrap()); + return read3Response; + } else { + LOG.error("NFS READ3 resulted in accept state=" + acceptState); + throw new IOException("NFS READ3 resulted in accept state=" + acceptState); + } + } - public RMDIR3Response rmdir(FileHandle dir, String name, - Credentials credentials) throws IOException { - - RMDIR3Response rmdir3Response = null; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; + public READDIR3Response readdir(FileHandle dir, long cookie, long cookieVerf, int count, + Credentials credentials) throws IOException { + + READDIR3Response readdir3Response; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildREADDIR3Request(in, dir, cookie, cookieVerf, count); + + // Issue the call + acceptState = service(NFSPROC3.READDIR, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + readdir3Response = NFSv3ResponseBuilder.buildREADDIR3Response(out.asReadOnlyWrap()); + return readdir3Response; + } else { + LOG.error("NFS READDIR3 resulted in accept state=" + acceptState); + throw new IOException("NFS READDIR3 resulted in accept state=" + acceptState); + } + } - // Construct the NFS request - NFSv3RequestBuilder.buildRMDIR3Request(in, dir, name); - - // Issue the call - acceptState = service(NFSPROC3.RMDIR, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - rmdir3Response = NFSv3ResponseBuilder.buildRMDIR3Response(out.asReadOnlyWrap()); - return rmdir3Response; - } else { - LOG.error("NFS RMDIR3 resulted in accept state=" + acceptState); - throw new IOException("NFS RMDIR3 resulted in accept state=" + acceptState); + public REMOVE3Response remove(FileHandle dir, String name, Credentials credentials) + throws IOException { + + REMOVE3Response remove3Response = null; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildREMOVE3Request(in, dir, name); + + // Issue the call + acceptState = service(NFSPROC3.REMOVE, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + remove3Response = NFSv3ResponseBuilder.buildREMOVE3Response(out.asReadOnlyWrap()); + return remove3Response; + } else { + LOG.error("NFS REMOVE3 resulted in accept state=" + acceptState); + throw new IOException("NFS REMOVE3 resulted in accept state=" + acceptState); + } } - } - - public SETATTR3Response setattr(FileHandle handle, Nfs3SetAttr attr, - Boolean check, NfsTime ctime, Credentials credentials) throws IOException { - - SETATTR3Response setattr3Response = null; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; - // Construct the NFS request - NFSv3RequestBuilder.buildSETATTR3Request(in, handle, attr, check, ctime); - - // Issue the call - acceptState = service(NFSPROC3.SETATTR, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - setattr3Response = NFSv3ResponseBuilder.buildSETATTR3Response(out.asReadOnlyWrap()); - return setattr3Response; - } else { - LOG.error("NFS SETATTR3 resulted in accept state=" + acceptState); - throw new IOException("NFS SETATTR3 resulted in accept state=" + acceptState); + public RENAME3Response rename(FileHandle fromDir, String fromName, FileHandle toDir, + String toName, Credentials credentials) throws IOException { + + RENAME3Response rename3Response = null; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildRENAME3Request(in, fromDir, fromName, toDir, toName); + + // Issue the call + long start = System.currentTimeMillis(); + acceptState = service(NFSPROC3.RENAME, in, out, credentials); + LOG.info("RPC RENAME took " + (System.currentTimeMillis() - start) + " ms"); + if (acceptState == AcceptState.SUCCESS) { + rename3Response = NFSv3ResponseBuilder.buildRENAME3Response(out.asReadOnlyWrap()); + return rename3Response; + } else { + LOG.error("NFS RENAME3 resulted in accept state=" + acceptState); + throw new IOException("NFS RENAME3 resulted in accept state=" + acceptState); + } } - } - - public WRITE3Response write(FileHandle file, long offset, int count, - WriteStableHow stableHow, byte[] data, Credentials credentials) throws IOException { - - WRITE3Response write3Response = null; - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; - // Construct the NFS request - NFSv3RequestBuilder.buildWRITE3Request(in, file, offset, count, stableHow, data); - - // Issue the call - acceptState = service(NFSPROC3.WRITE, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - write3Response = NFSv3ResponseBuilder.buildWRITE3Response(out.asReadOnlyWrap()); - return write3Response; - } else { - LOG.error("NFS WRITE3 resulted in accept state=" + acceptState); - throw new IOException("NFS WRITE3 resulted in accept state=" + acceptState); + public RMDIR3Response rmdir(FileHandle dir, String name, Credentials credentials) + throws IOException { + + RMDIR3Response rmdir3Response = null; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildRMDIR3Request(in, dir, name); + + // Issue the call + acceptState = service(NFSPROC3.RMDIR, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + rmdir3Response = NFSv3ResponseBuilder.buildRMDIR3Response(out.asReadOnlyWrap()); + return rmdir3Response; + } else { + LOG.error("NFS RMDIR3 resulted in accept state=" + acceptState); + throw new IOException("NFS RMDIR3 resulted in accept state=" + acceptState); + } } - } - - public Nfs3FileAttributes getFileAttributes(FileHandle handle, Credentials credentials) throws IOException { - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; + public SETATTR3Response setattr(FileHandle handle, Nfs3SetAttr attr, Boolean check, + NfsTime ctime, Credentials credentials) throws IOException { + + SETATTR3Response setattr3Response = null; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildSETATTR3Request(in, handle, attr, check, ctime); + + // Issue the call + acceptState = service(NFSPROC3.SETATTR, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + setattr3Response = NFSv3ResponseBuilder.buildSETATTR3Response(out.asReadOnlyWrap()); + return setattr3Response; + } else { + LOG.error("NFS SETATTR3 resulted in accept state=" + acceptState); + throw new IOException("NFS SETATTR3 resulted in accept state=" + acceptState); + } + } - // Construct the NFS request - NFSv3RequestBuilder.buildGETATTR3Request(in, handle); - - // Issue the call - acceptState = service(NFSPROC3.GETATTR, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - XDR buffer = out.asReadOnlyWrap(); - int status = buffer.readInt(); - if(status == Nfs3Status.NFS3_OK) { - Nfs3FileAttributes attrs = Nfs3FileAttributes.deserialize(buffer); - return attrs; - } else { - LOG.error("NFS GetFileAttributes resulted in status=" + status); - throw new IOException("NFS GetFileAttributes resulted in status=" + status); - } - } else { - LOG.error("NFS GetFileAttributes resulted in accept state=" + acceptState); - throw new IOException("NFS GetFileAttributes resulted in accept state=" + acceptState); + public WRITE3Response write(FileHandle file, long offset, int count, WriteStableHow stableHow, + byte[] data, Credentials credentials) throws IOException { + + WRITE3Response write3Response = null; + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildWRITE3Request(in, file, offset, count, stableHow, data); + + // Issue the call + acceptState = service(NFSPROC3.WRITE, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + write3Response = NFSv3ResponseBuilder.buildWRITE3Response(out.asReadOnlyWrap()); + return write3Response; + } else { + LOG.error("NFS WRITE3 resulted in accept state=" + acceptState); + throw new IOException("NFS WRITE3 resulted in accept state=" + acceptState); + } } - } - - public FileHandle getFileHandle(FileHandle directory, String filename, Credentials credentials) throws IOException { - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; + public Nfs3FileAttributes getFileAttributes(FileHandle handle, Credentials credentials) + throws IOException { + + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildGETATTR3Request(in, handle); + + // Issue the call + acceptState = service(NFSPROC3.GETATTR, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + XDR buffer = out.asReadOnlyWrap(); + int status = buffer.readInt(); + if (status == Nfs3Status.NFS3_OK) { + Nfs3FileAttributes attrs = Nfs3FileAttributes.deserialize(buffer); + return attrs; + } else { + LOG.error("NFS GetFileAttributes resulted in status=" + status); + throw new IOException("NFS GetFileAttributes resulted in status=" + status); + } + } else { + LOG.error("NFS GetFileAttributes resulted in accept state=" + acceptState); + throw new IOException("NFS GetFileAttributes resulted in accept state=" + acceptState); + } + } - // Construct the NFS request - NFSv3RequestBuilder.buildLOOKUP3Request(in, directory, filename); - - // Issue the call - acceptState = service(NFSPROC3.LOOKUP, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - XDR xdr = out.asReadOnlyWrap(); - int status = xdr.readInt(); - if(status == Nfs3Status.NFS3_OK) { - FileHandle fileHandle = new Nfs3FileHandle(); - fileHandle.deserialize(xdr); - return fileHandle; - } else { - return null; - /*throw new IOException("NFS could not get file handle");*/ - } - } else { - LOG.error("NFS LOOKUP3 resulted in accept state=" + acceptState); - throw new IOException("NFS LOOKUP3 resulted in accept state=" + acceptState); + public FileHandle getFileHandle(FileHandle directory, String filename, Credentials credentials) + throws IOException { + + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildLOOKUP3Request(in, directory, filename); + + // Issue the call + acceptState = service(NFSPROC3.LOOKUP, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + XDR xdr = out.asReadOnlyWrap(); + int status = xdr.readInt(); + if (status == Nfs3Status.NFS3_OK) { + FileHandle fileHandle = new Nfs3FileHandle(); + fileHandle.deserialize(xdr); + return fileHandle; + } else { + return null; + /* throw new IOException("NFS could not get file handle"); */ + } + } else { + LOG.error("NFS LOOKUP3 resulted in accept state=" + acceptState); + throw new IOException("NFS LOOKUP3 resulted in accept state=" + acceptState); + } } - } - - public Nfs3Info getFilesystemInfo(FileHandle handle, Credentials credentials) throws IOException { - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; - - // Construct the NFS request - NFSv3RequestBuilder.buildFSINFO3Request(in, handle); - - // Issue the call - acceptState = service(NFSPROC3.FSINFO, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - return NFSv3ResponseBuilder.buildFilesystemInfo(out.asReadOnlyWrap()); - } else { - LOG.error("NFS FSINFO3 resulted in accept state=" + acceptState); - throw new IOException("NFS FSINFO3 resulted in accept state=" + acceptState); + + public Nfs3Info getFilesystemInfo(FileHandle handle, Credentials credentials) throws IOException { + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildFSINFO3Request(in, handle); + + // Issue the call + acceptState = service(NFSPROC3.FSINFO, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + return NFSv3ResponseBuilder.buildFilesystemInfo(out.asReadOnlyWrap()); + } else { + LOG.error("NFS FSINFO3 resulted in accept state=" + acceptState); + throw new IOException("NFS FSINFO3 resulted in accept state=" + acceptState); + } } - } - - public Nfs3DirList getDirectoryList(FileHandle dir, long cookie, long cookieVerf, - int count, Credentials credentials) throws IOException { - XDR in = new XDR(); - XDR out = new XDR(); - AcceptState acceptState; + public Nfs3DirList getDirectoryList(FileHandle dir, long cookie, long cookieVerf, int count, + Credentials credentials) throws IOException { - // Construct the NFS request - NFSv3RequestBuilder.buildREADDIR3Request(in, dir, cookie, cookieVerf, count); - - // Issue the call - acceptState = service(NFSPROC3.READDIR, in, out, credentials); - if(acceptState == AcceptState.SUCCESS) { - return NFSv3ResponseBuilder.buildDirectoryList(out.asReadOnlyWrap()); - } else { - LOG.error("NFS READDIR3 resulted in accept state=" + acceptState); - throw new IOException("NFS READDIR3 resulted in accept state=" + acceptState); + XDR in = new XDR(); + XDR out = new XDR(); + AcceptState acceptState; + + // Construct the NFS request + NFSv3RequestBuilder.buildREADDIR3Request(in, dir, cookie, cookieVerf, count); + + // Issue the call + acceptState = service(NFSPROC3.READDIR, in, out, credentials); + if (acceptState == AcceptState.SUCCESS) { + return NFSv3ResponseBuilder.buildDirectoryList(out.asReadOnlyWrap()); + } else { + LOG.error("NFS READDIR3 resulted in accept state=" + acceptState); + throw new IOException("NFS READDIR3 resulted in accept state=" + acceptState); + } } - } - - private AcceptState service(NFSPROC3 procedure, XDR in, XDR out, Credentials credentials) throws IOException { - - // Make the NFS request - try { - RpcMessage reply; - reply = service(Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, procedure.getValue(), in, out, credentials); - RpcAcceptedReply accepted = (RpcAcceptedReply) reply; - return accepted.getAcceptState(); - } catch(RpcException exception) { - LOG.error("Got a RPC exception"); - exception.printStackTrace(); - throw new IOException(exception.getCause()); + + private AcceptState service(NFSPROC3 procedure, XDR in, XDR out, Credentials credentials) + throws IOException { + + // Make the NFS request + try { + RpcMessage reply; + reply + = service(Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, procedure.getValue(), in, out, + credentials); + RpcAcceptedReply accepted = (RpcAcceptedReply) reply; + return accepted.getAcceptState(); + } catch (RpcException exception) { + LOG.error("Got a RPC exception"); + exception.printStackTrace(); + throw new IOException("Got a RPC exception", exception); + } } - } - + } diff --git a/src/main/java/org/apache/hadoop/fs/nfs/NFSv3ResponseBuilder.java b/src/main/java/org/apache/hadoop/fs/nfs/NFSv3ResponseBuilder.java index 8014893..26d79bb 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/NFSv3ResponseBuilder.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/NFSv3ResponseBuilder.java @@ -371,7 +371,6 @@ public static Nfs3FileAttributes buildPostOpAttributes(XDR xdr) throws IOExcepti public static List buildDirEntryList(XDR xdr) throws IOException { List entries = new LinkedList(); boolean haveMore = xdr.readBoolean(); - System.out.println("Has list of entries=" + haveMore); while(haveMore) { long fileId = xdr.readHyper(); byte[] name = xdr.readVariableOpaque(); @@ -379,7 +378,6 @@ public static List buildDirEntryList(XDR xdr) throws IOException { Entry3 entry = new Entry3(fileId, new String(name), cookie); entries.add(entry); haveMore = xdr.readBoolean(); - System.out.println("haveMore=" + haveMore); } return entries; } diff --git a/src/main/java/org/apache/hadoop/fs/nfs/mount/MountClient.java b/src/main/java/org/apache/hadoop/fs/nfs/mount/MountClient.java index eaca046..d0b60ef 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/mount/MountClient.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/mount/MountClient.java @@ -21,8 +21,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.nfs.rpc.RpcClient; import org.apache.hadoop.fs.nfs.rpc.RpcException; +import org.apache.hadoop.fs.nfs.topology.Namespace; +import org.apache.hadoop.fs.nfs.topology.NamespaceOptions; import org.apache.hadoop.mount.MountInterface; import org.apache.hadoop.oncrpc.RpcAcceptedReply; +import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState; import org.apache.hadoop.oncrpc.RpcMessage; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.Credentials; @@ -37,28 +40,28 @@ public class MountClient extends RpcClient { public final static Log LOG = LogFactory.getLog(MountClient.class); private final Credentials credentials; - public MountClient(String host, int port, Configuration conf) { + public MountClient(Namespace space, String host, int port) throws IOException { super(host, port); - // By default, we use AUTH_NONE for mount protocol, unless users specify - // AUTH_SYS in the configuration file. - if (conf != null) { - String credentialsFlavor = conf.get("fs.nfs.mount.auth.flavor", null); - - if (credentialsFlavor != null - && (credentialsFlavor.equalsIgnoreCase("AUTH_SYS") || credentialsFlavor - .equalsIgnoreCase("AUTH_UNIX"))) { + // Check namespace for authentication scheme + if(space == null || space.getConfiguration() == null) { + throw new IOException("No namespace given"); + } + + NamespaceOptions options = space.getConfiguration(); + String authScheme = options.getNfsAuthScheme(); + + if(authScheme != null && (authScheme.equalsIgnoreCase("AUTH_SYS") || authScheme.equalsIgnoreCase("AUTH_UNIX"))) { CredentialsSys sys = new CredentialsSys(); - sys.setUID(0); - sys.setGID(0); + sys.setUID(options.getNfsUid()); + sys.setGID(options.getNfsGid()); sys.setStamp(new Long(System.currentTimeMillis()).intValue()); credentials = sys; - } else { - credentials = new CredentialsNone(); - } - } else { - credentials = new CredentialsNone(); } + // Use AUTH_NONE by default + else { + credentials = new CredentialsNone(); + } } public MountMNTResponse mnt(String path) throws IOException { @@ -79,9 +82,14 @@ public MountMNTResponse mnt(String path) throws IOException { if (reply instanceof RpcAcceptedReply) { RpcAcceptedReply accepted = (RpcAcceptedReply) reply; - LOG.debug("Mount MNT operation acceptState=" + accepted.getAcceptState()); - mountMNTResponse = new MountMNTResponse(out.asReadOnlyWrap()); - return mountMNTResponse; + if(accepted.getAcceptState().equals(AcceptState.SUCCESS)) { + LOG.debug("Mount MNT operation acceptState=" + accepted.getAcceptState()); + mountMNTResponse = new MountMNTResponse(out.asReadOnlyWrap()); + return mountMNTResponse; + } else { + LOG.error("Could not mount filesystem"); + throw new IOException("Could not mount filesystem. Got status " + accepted.getAcceptState()); + } } else { LOG.error("Mount MNT operation was not accepted"); throw new IOException("Mount MNT operation was not accepted"); diff --git a/src/main/java/org/apache/hadoop/fs/nfs/portmap/PortmapClient.java b/src/main/java/org/apache/hadoop/fs/nfs/portmap/PortmapClient.java index 92f7c95..16b390a 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/portmap/PortmapClient.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/portmap/PortmapClient.java @@ -39,7 +39,7 @@ public class PortmapClient extends RpcClient { public final static Log LOG = LogFactory.getLog(PortmapClient.class); - public PortmapClient(String host, int port) { + public PortmapClient(String host, int port) throws IOException { super(host, port); } diff --git a/src/main/java/org/apache/hadoop/fs/nfs/rpc/RpcClient.java b/src/main/java/org/apache/hadoop/fs/nfs/rpc/RpcClient.java index 578f7ea..ae05295 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/rpc/RpcClient.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/rpc/RpcClient.java @@ -11,14 +11,15 @@ * or implied. See the License for the specific language governing permissions and limitations under * the License. */ - package org.apache.hadoop.fs.nfs.rpc; - +import java.io.IOException; import java.net.InetSocketAddress; import java.util.Map; +import java.util.Queue; import java.util.Random; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -49,31 +50,35 @@ public class RpcClient { final ClientBootstrap bootstrap; final Map tasks; + final Queue pending; final AtomicBoolean errored; final AtomicBoolean shutdown; final AtomicInteger xid; + final RpcClient client; ChannelFuture future; - public static final int RECONNECT_DELAY_MS = 5000; + public static final int RECONNECT_DELAY_MS = 5; public static final int MAX_RETRIES = 10; - public static final int MAX_RPCWAIT_MS = 10000; + public static final int MAX_RPCWAIT_MS = 60000; + + public static final Timer timer = new HashedWheelTimer(); public static final Log LOG = LogFactory.getLog(RpcClient.class); - public RpcClient(String hostname, int port) { + public RpcClient(String hostname, int port) throws IOException { - tasks = new ConcurrentHashMap(); + tasks = new ConcurrentHashMap<>(); + pending = new ConcurrentLinkedQueue<>(); xid = new AtomicInteger(new Random(System.currentTimeMillis()).nextInt(1024) * 1000000); errored = new AtomicBoolean(false); shutdown = new AtomicBoolean(false); - final Timer timer = new HashedWheelTimer(); ChannelFactory factory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool(), - Executors.newCachedThreadPool(), 1, 1); + Executors.newCachedThreadPool(), 1, 8); - final RpcClient client = this; + client = this; ChannelPipelineFactory pipelineFactory = new ChannelPipelineFactory() { @Override public ChannelPipeline getPipeline() { @@ -86,13 +91,16 @@ public ChannelPipeline getPipeline() { bootstrap.setOption("remoteAddress", new InetSocketAddress(hostname, port)); bootstrap.setOption("tcpNoDelay", true); - bootstrap.setOption("keepAlive", true); + bootstrap.setOption("keepAlive", false); bootstrap.setOption("soLinger", 0); - bootstrap.setOption("receiveBufferSize", 16 * 1024 * 1024); - bootstrap.setOption("sendBufferSize", 16 * 1024 * 1024); + bootstrap.setOption("receiveBufferSize", 32 * 1024 * 1024); + bootstrap.setOption("sendBufferSize", 32 * 1024 * 1024); future = bootstrap.connect(); - + future.awaitUninterruptibly(); + if(future.isDone() && (future.isCancelled() || !future.isSuccess())) { + throw new IOException("Could not connect to " + hostname + " on port " + port); + } } public RpcMessage service(int program, int version, int procedure, XDR in, XDR out, @@ -108,24 +116,31 @@ public RpcMessage service(int program, int version, int procedure, XDR in, XDR o ChannelBuffer buf = XDR.writeMessageTcp(request, true); RpcNetworkTask task = new RpcNetworkTask(callXid, buf); - // Issue it and signal - synchronized (tasks) { - tasks.put(callXid, task); - } - task.signal(); + // Issue the task + tasks.put(callXid, task); + pending.add(task); + sendToChannel(); // Wait for task to complete + boolean completed = false; for (int i = 0; i < MAX_RETRIES; ++i) { if (task.wait(MAX_RPCWAIT_MS)) { - if (i > 0) { - LOG.debug("RPC: Call xid=" + task.getXid() + " completed with " + i + " retries"); - // LOG.debug("exiting abruptly!!!!"); - // System.exit(-1); - } + completed = true; break; + } else { + LOG.info("RPC: xid=" + callXid + " took too long, so retrying"); + task = new RpcNetworkTask(callXid, buf); + tasks.put(callXid, task); + pending.add(task); + sendToChannel(); } } + if (!completed || task.getReply() == null) { + LOG.error("RPC: xid=" + callXid + " timed out"); + throw new RpcException("RPC: xid=" + callXid + " timed out"); + } + // Process reply and return RpcReply reply = task.getReply(); if (reply.getState() == RpcReply.ReplyState.MSG_DENIED) { @@ -133,6 +148,7 @@ public RpcMessage service(int program, int version, int procedure, XDR in, XDR o throw new RpcException("RPC: xid=" + callXid + " RpcReply request denied: " + reply); } + // Call was accepted so process the correct reply RpcAcceptedReply acceptedReply = (RpcAcceptedReply) reply; LOG.debug("RPC: xid=" + callXid + " completed successfully with acceptstate=" + acceptedReply.getAcceptState()); @@ -143,7 +159,7 @@ public RpcMessage service(int program, int version, int procedure, XDR in, XDR o } public void shutdown() { - LOG.info("Shutting down"); + long start = System.currentTimeMillis(); try { shutdown.set(true); future.getChannel().close(); @@ -151,6 +167,7 @@ public void shutdown() { bootstrap.shutdown(); } finally { bootstrap.releaseExternalResources(); + LOG.debug("RpcClient shutdown took " + (System.currentTimeMillis() - start) + " ms"); } } @@ -163,30 +180,25 @@ protected synchronized void setChannel(ChannelFuture future) { } protected RpcNetworkTask getTask() { - RpcNetworkTask task = null; - synchronized (tasks) { - for (RpcNetworkTask t : tasks.values()) { - if ((System.currentTimeMillis() - t.getLastEnqueuedTime()) > 10000) { - task = t; - task.setEnqueueTime(System.currentTimeMillis()); - break; - } - } - } - return task; + return pending.poll(); } protected void completeTask(int xid, RpcReply reply, XDR replyData) { - synchronized (tasks) { - if (tasks.containsKey(xid)) { - RpcNetworkTask found = tasks.remove(xid); - found.setReply(reply, replyData); - found.signal(); - LOG.debug("RPC: Call finished for xid=" + xid); - } else { - LOG.error("RPC: Could not find original call for xid=" + xid); - errored.set(true); + RpcNetworkTask found = tasks.remove(xid); + if (found != null) { + found.setReply(reply, replyData); + found.signal(); + } + } + + protected void sendToChannel() { + try { + RpcNetworkTask task = getTask(); + if (task != null) { + future.getChannel().write(task.getCallData()); } + } catch (Exception ignore) { + } } diff --git a/src/main/java/org/apache/hadoop/fs/nfs/rpc/RpcClientHandler.java b/src/main/java/org/apache/hadoop/fs/nfs/rpc/RpcClientHandler.java index a837917..29fa5ba 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/rpc/RpcClientHandler.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/rpc/RpcClientHandler.java @@ -1,17 +1,18 @@ /** * Copyright 2014 NetApp Inc. All Rights Reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under * the License. */ - package org.apache.hadoop.fs.nfs.rpc; import java.util.concurrent.TimeUnit; @@ -37,72 +38,72 @@ public class RpcClientHandler extends IdleStateAwareChannelHandler { - final RpcClient client; - final ClientBootstrap bootstrap; - final Timer timer; - - public static final Log LOG = LogFactory.getLog(RpcClientHandler.class); - - public RpcClientHandler(RpcClient client, ClientBootstrap bootstrap, Timer timer) { - this.client = client; - this.bootstrap = bootstrap; - this.timer = timer; - } - - @Override - public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent event) { - sendNextMessage(ctx, event); - } - - @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent event) { - ChannelBuffer buf = (ChannelBuffer) event.getMessage(); - XDR replyxdr = new XDR(buf.toByteBuffer().asReadOnlyBuffer(), State.READING); - RpcReply rpcreply = RpcReply.read(replyxdr); - client.completeTask(rpcreply.getXid(), rpcreply, replyxdr); - sendNextMessage(ctx, event); - } - - @Override - public void writeComplete(ChannelHandlerContext ctx, WriteCompletionEvent event) { - sendNextMessage(ctx, event); - } - - @Override - public void channelIdle(ChannelHandlerContext ctx, IdleStateEvent event) { - sendNextMessage(ctx, event); - } - - @Override - public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent event) { - LOG.info("RPC: channel was disconnected"); - } - - @Override - public void channelClosed(ChannelHandlerContext ctx, ChannelStateEvent event) { - timer.newTimeout(new TimerTask() { - public void run(Timeout timeout) { - if(!client.hasShutdown()) { - LOG.info("RPC: channel was closed. Trying to reconnect"); - client.setChannel(bootstrap.connect()); + final RpcClient client; + final ClientBootstrap bootstrap; + final Timer timer; + + public static final Log LOG = LogFactory.getLog(RpcClientHandler.class); + + public RpcClientHandler(RpcClient client, ClientBootstrap bootstrap, Timer timer) { + this.client = client; + this.bootstrap = bootstrap; + this.timer = timer; + } + + @Override + public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent event) { + sendNextMessage(ctx, event); + } + + @Override + public void messageReceived(ChannelHandlerContext ctx, MessageEvent event) { + ChannelBuffer buf = (ChannelBuffer) event.getMessage(); + XDR replyxdr = new XDR(buf.toByteBuffer().asReadOnlyBuffer(), State.READING); + RpcReply rpcreply = RpcReply.read(replyxdr); + client.completeTask(rpcreply.getXid(), rpcreply, replyxdr); + sendNextMessage(ctx, event); + } + + @Override + public void writeComplete(ChannelHandlerContext ctx, WriteCompletionEvent event) { + sendNextMessage(ctx, event); + } + + @Override + public void channelIdle(ChannelHandlerContext ctx, IdleStateEvent event) { + sendNextMessage(ctx, event); + } + + @Override + public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent event) { + LOG.debug("RPC: channel was disconnected"); + } + + @Override + public void channelClosed(ChannelHandlerContext ctx, ChannelStateEvent event) { + timer.newTimeout(new TimerTask() { + public void run(Timeout timeout) { + if (!client.hasShutdown()) { + LOG.debug("RPC: channel was closed. Trying to reconnect"); + client.setChannel(bootstrap.connect()); + } + } + }, RpcClient.RECONNECT_DELAY_MS, TimeUnit.MILLISECONDS); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent event) { + LOG.error("RPC: Got an exception", event.getCause()); + } + + protected void sendNextMessage(ChannelHandlerContext ctx, ChannelEvent event) { + RpcNetworkTask task = client.getTask(); + if (task != null) { + if (event.getChannel().isConnected()) { + LOG.debug("Send call with xid=" + task.xid); + event.getChannel().write(task.getCallData()); + } } - } - }, RpcClient.RECONNECT_DELAY_MS, TimeUnit.MILLISECONDS); - } - - @Override - public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent event) { - LOG.error("RPC: Got an exception", event.getCause()); - } - - protected void sendNextMessage(ChannelHandlerContext ctx, ChannelEvent event) { - RpcNetworkTask task = client.getTask(); - if(task != null) { - if(event.getChannel().isConnected()) { - LOG.debug("Send call with xid=" + task.xid); - event.getChannel().write(task.getCallData()); - } } - } - + } diff --git a/src/main/java/org/apache/hadoop/fs/nfs/rpc/RpcNetworkTask.java b/src/main/java/org/apache/hadoop/fs/nfs/rpc/RpcNetworkTask.java index 19fb963..e6521be 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/rpc/RpcNetworkTask.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/rpc/RpcNetworkTask.java @@ -17,6 +17,9 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + import org.apache.hadoop.oncrpc.RpcReply; import org.apache.hadoop.oncrpc.XDR; import org.jboss.netty.buffer.ChannelBuffer; @@ -31,13 +34,12 @@ class RpcNetworkTask { RpcReply reply; XDR replyData; - long lastEnqueuedTime; + public static final Log LOG = LogFactory.getLog(RpcNetworkTask.class); public RpcNetworkTask(Integer xid, ChannelBuffer callData) { this.xid = xid; this.callData = callData; - this.lastEnqueuedTime = 0; - countdown = new CountDownLatch(2); + countdown = new CountDownLatch(1); } public int getXid() { @@ -61,19 +63,12 @@ public XDR getReplyData() { return replyData; } - public void setEnqueueTime(long time) { - this.lastEnqueuedTime = time; - } - - public long getLastEnqueuedTime() { - return this.lastEnqueuedTime; - } - public boolean wait(int millis) { while(true) { try { return countdown.await(millis, TimeUnit.MILLISECONDS); } catch(InterruptedException exception) { + LOG.info("Thread got interrupted while waiting for task xid=" + xid + " to finish"); continue; } } diff --git a/src/main/java/org/apache/hadoop/fs/nfs/stream/Commit.java b/src/main/java/org/apache/hadoop/fs/nfs/stream/Commit.java index 7c8e2a8..e9a29ef 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/stream/Commit.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/stream/Commit.java @@ -21,20 +21,23 @@ import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response; +import org.apache.hadoop.oncrpc.security.Credentials; public class Commit implements Callable { final NFSv3FileSystemStore store; final FileHandle handle; + final Credentials credentials; - public Commit(NFSBufferedOutputStream stream, NFSv3FileSystemStore store, FileHandle handle, Long offset, Integer length) { + public Commit(NFSBufferedOutputStream stream, NFSv3FileSystemStore store, FileHandle handle, Credentials credentials, Long offset, Integer length) { this.store = store; this.handle = handle; + this.credentials = credentials; } @Override public Commit call() throws Exception { - COMMIT3Response response = store.commit(handle, 0L, 0, store.getCredentials()); + COMMIT3Response response = store.commit(handle, 0L, 0, credentials); int status = response.getStatus(); if (status != Nfs3Status.NFS3_OK) { throw new IOException("Commit error: status=" + status); diff --git a/src/main/java/org/apache/hadoop/fs/nfs/stream/NFSBufferedInputStream.java b/src/main/java/org/apache/hadoop/fs/nfs/stream/NFSBufferedInputStream.java index f7747c1..7b3d811 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/stream/NFSBufferedInputStream.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/stream/NFSBufferedInputStream.java @@ -1,19 +1,21 @@ /** * Copyright 2014 NetApp Inc. All Rights Reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under * the License. */ - package org.apache.hadoop.fs.nfs.stream; +import java.io.EOFException; import java.io.IOException; import java.util.Iterator; import java.util.Map; @@ -37,6 +39,7 @@ import org.apache.hadoop.fs.nfs.StreamStatistics; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; +import org.apache.hadoop.oncrpc.security.Credentials; public class NFSBufferedInputStream extends FSInputStream { @@ -47,12 +50,12 @@ public class NFSBufferedInputStream extends FSInputStream { final NFSv3FileSystemStore store; final FileHandle handle; + final Credentials credentials; final String pathString; final int readBlockSizeBits; final long splitSize; final StreamStatistics statistics; - final int poolSize; final boolean doPrefetch; final AtomicBoolean closed; final ExecutorService executors; @@ -61,14 +64,13 @@ public class NFSBufferedInputStream extends FSInputStream { static final AtomicInteger streamId; - public static final int DEFAULT_CACHE_SIZE_IN_BLOCKS = 512; + public static final int DEFAULT_CACHE_SIZE_IN_BLOCKS = 1024; - public static final int MIN_PREFETCH_POOL_SIZE = 1; - public static final int MAX_PREFETCH_POOL_SIZE = 512; - public static final int DEFAULT_PREFETCH_POOL_SIZE = 128; + public static final int MAX_PREFETCH_POOL_SIZE = 256; + public static final int DEFAULT_PREFETCH_POOL_SIZE = 4; public static final boolean DEFAULT_PREFETCH_ENABLED = true; - public static final int DEFAULT_READAHEAD_SIZE = 128; + public static final int DEFAULT_READAHEAD_SIZE = 256; public final static Log LOG = LogFactory.getLog(NFSBufferedInputStream.class); @@ -76,40 +78,35 @@ public class NFSBufferedInputStream extends FSInputStream { streamId = new AtomicInteger(1); } - public NFSBufferedInputStream(Configuration configuration, NFSv3FileSystemStore store, - FileHandle handle, Path f, Configuration conf, int readBlockSizeBits, String scheme, - long splitSize, FileSystem.Statistics fsStat) throws IOException { + public NFSBufferedInputStream(NFSv3FileSystemStore store, + FileHandle handle, Path f, Configuration conf, + long splitSize, Credentials credentials, FileSystem.Statistics fsStat) throws IOException { this.store = store; this.handle = handle; + this.credentials = credentials; this.pathString = f.toUri().getPath(); - - poolSize = - Math.min( - MAX_PREFETCH_POOL_SIZE, - Math.max(MIN_PREFETCH_POOL_SIZE, - conf.getInt("fs.nfs.numprefetchthreads", DEFAULT_PREFETCH_POOL_SIZE))); doPrefetch = conf.getBoolean("fs.nfs.prefetch", DEFAULT_PREFETCH_ENABLED); - this.fileOffset = 0L; - this.readBlockSizeBits = readBlockSizeBits; + this.readBlockSizeBits = store.getReadSizeBits(); this.splitSize = splitSize; this.closed = new AtomicBoolean(false); - this.ongoing = new ConcurrentHashMap>(poolSize); - this.cache = new ConcurrentHashMap(DEFAULT_CACHE_SIZE_IN_BLOCKS); - this.statistics = - new StreamStatistics(NFSBufferedInputStream.class + pathString, streamId.getAndIncrement(), - true); - this.executors = new ThreadPoolExecutor(32, poolSize, 1, TimeUnit.SECONDS, - new LinkedBlockingDeque(1024), new ThreadPoolExecutor.CallerRunsPolicy()); + this.ongoing = new ConcurrentHashMap<>(DEFAULT_PREFETCH_POOL_SIZE); + this.cache = new ConcurrentHashMap<>(DEFAULT_CACHE_SIZE_IN_BLOCKS); + this.statistics + = new StreamStatistics(NFSBufferedInputStream.class + pathString, streamId.getAndIncrement(), + true); + this.executors = new ThreadPoolExecutor(DEFAULT_PREFETCH_POOL_SIZE, MAX_PREFETCH_POOL_SIZE, 5, TimeUnit.SECONDS, + new LinkedBlockingDeque(1024), new ThreadPoolExecutor.CallerRunsPolicy()); // Keep track of the file length at file open // NOTE: The file does not get modified while this stream is open - Nfs3FileAttributes attributes = store.getFileAttributes(handle, store.getCredentials()); + Nfs3FileAttributes attributes = store.getFileAttributes(handle, credentials); if (attributes != null) { this.fileLength = attributes.getSize(); + this.prefetchBlockLimit = (long) (Math.min(fileLength, splitSize) >> readBlockSizeBits); if (this.fileLength < 0) { throw new IOException("File length is invalid: " + this.fileLength); } @@ -122,7 +119,7 @@ public NFSBufferedInputStream(Configuration configuration, NFSv3FileSystemStore @Override public synchronized void seek(long pos) throws IOException { if (pos > fileLength) { - throw new IOException("Cannot seek after EOF: pos=" + pos + ", fileLength=" + fileLength); + throw new EOFException("Cannot seek after EOF: pos=" + pos + ", fileLength=" + fileLength); } fileOffset = pos; prefetchBlockLimit = (long) (Math.min(fileLength, pos + this.splitSize) >> readBlockSizeBits); @@ -176,7 +173,7 @@ private synchronized int _read(byte data[], int offset, int length) throws IOExc int loOffset = (int) (fileOffset - (loBlockId << readBlockSizeBits)); int hiOffset = (int) ((fileOffset + lengthToRead - 1) - (hiBlockId << readBlockSizeBits)); - if(closed.get()) { + if (closed.get()) { LOG.warn("Reading from an already closed InputStream. Check your code"); } @@ -240,14 +237,18 @@ private StreamBlock getBlock(long blockId) throws IOException { // Issue prefetch for upcoming blocks if (doPrefetch) { - for (long bid = blockId + 1; bid < blockId + DEFAULT_READAHEAD_SIZE; ++bid) { + if (blockId >= prefetchBlockLimit) { + prefetchBlockLimit += (long) (Math.min(fileLength, this.splitSize) >> readBlockSizeBits); + LOG.info("Changing prefetchBlockLimit to " + prefetchBlockLimit); + } + for (long bid = blockId + 1; bid < blockId + DEFAULT_READAHEAD_SIZE && bid < prefetchBlockLimit; ++bid) { if (!ongoing.containsKey(bid) && !cache.containsKey(bid)) { StreamBlock block = new StreamBlock(readBlockSizeBits); block.setBlockId(bid); block.setReady(false); cache.put(bid, block); - Read task = new Read(store, handle, statistics, bid, block); + Read task = new Read(store, handle, credentials, statistics, bid, block); Future future = executors.submit(task); ongoing.put(bid, future); } @@ -257,14 +258,14 @@ private StreamBlock getBlock(long blockId) throws IOException { // Block is being fetched, so wait for it if (ongoing.containsKey(blockId)) { Future future = ongoing.get(blockId); - while(true) { + while (true) { try { LOG.debug("Waiting for read task to complete ongoing reading block id=" + blockId); future.get(); break; - } catch(InterruptedException interrupted) { + } catch (InterruptedException interrupted) { continue; - } catch(Exception error) { + } catch (Exception error) { throw new IOException("Read resulted in an error", error); } } @@ -273,13 +274,13 @@ private StreamBlock getBlock(long blockId) throws IOException { // Some prefetches are done, check for them for (Iterator>> iter = ongoing.entrySet().iterator(); iter.hasNext();) { Future future = iter.next().getValue(); - if(future.isDone()) { + if (future.isDone()) { try { future.get(); iter.remove(); - } catch(InterruptedException interrupted) { + } catch (InterruptedException interrupted) { // Ignore - } catch(Exception error) { + } catch (Exception error) { throw new IOException("Prefetched resulted in error", error); } } @@ -308,19 +309,18 @@ private StreamBlock getBlock(long blockId) throws IOException { block.setReady(false); cache.put(blockId, block); - Read task = new Read(store, handle, statistics, blockId, block); + Read task = new Read(store, handle, credentials, statistics, blockId, block); Future future = executors.submit(task); - while(true) { + while (true) { try { future.get(); break; - } catch(InterruptedException interrupted) { + } catch (InterruptedException interrupted) { continue; - } catch(Exception error) { + } catch (Exception error) { throw new IOException("Read resulted in an error", error); } } - LOG.info("Looping inside while loop"); } } @@ -329,22 +329,22 @@ private StreamBlock getBlock(long blockId) throws IOException { public void close() throws IOException { boolean first = true; - if(closed.get()) { + if (closed.get()) { first = false; LOG.warn("Closing an already closed InputStream. Check your code"); } closed.set(true); // Shutdown the thread pool - if(first) { + if (first) { executors.shutdown(); try { - executors.awaitTermination(60, TimeUnit.SECONDS); + executors.awaitTermination(1, TimeUnit.SECONDS); } catch (InterruptedException exception) { // Ignore } } - + LOG.info(statistics); super.close(); } diff --git a/src/main/java/org/apache/hadoop/fs/nfs/stream/NFSBufferedOutputStream.java b/src/main/java/org/apache/hadoop/fs/nfs/stream/NFSBufferedOutputStream.java index 9156425..e43908c 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/stream/NFSBufferedOutputStream.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/stream/NFSBufferedOutputStream.java @@ -36,26 +36,26 @@ import org.apache.hadoop.fs.nfs.StreamStatistics; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; +import org.apache.hadoop.oncrpc.security.Credentials; public class NFSBufferedOutputStream extends OutputStream { final FileHandle handle; + final Credentials credentials; final Path path; final String pathString; final StreamStatistics statistics; final NFSv3FileSystemStore store; final AtomicBoolean closed; final int blockSizeBits; - final int poolSize; final ExecutorService executors; final List> ongoing; long fileOffset; StreamBlock currentBlock; - private static final int MIN_WRITEBACK_POOL_SIZE = 1; - private static final int MAX_WRITEBACK_POOL_SIZE = 512; - private static final int DEFAULT_WRITEBACK_POOL_SIZE = 128; + private static final int MAX_WRITEBACK_POOL_SIZE = 256; + private static final int DEFAULT_WRITEBACK_POOL_SIZE = 4; static final AtomicInteger streamId; @@ -66,23 +66,17 @@ public class NFSBufferedOutputStream extends OutputStream { } public NFSBufferedOutputStream(Configuration configuration, FileHandle handle, Path path, - NFSv3FileSystemStore store, int blockSizeBits, boolean append) throws IOException { + NFSv3FileSystemStore store, Credentials credentials, boolean append) throws IOException { this.handle = handle; + this.credentials = credentials; this.path = path; this.pathString = path.toUri().getPath(); - - poolSize = - Math.min( - MAX_WRITEBACK_POOL_SIZE, - Math.max(MIN_WRITEBACK_POOL_SIZE, - configuration.getInt("fs.nfs.numwritebackthreads", DEFAULT_WRITEBACK_POOL_SIZE))); - this.statistics = new StreamStatistics(NFSBufferedInputStream.class + pathString, streamId.getAndIncrement(), false); this.store = store; - this.blockSizeBits = blockSizeBits; + this.blockSizeBits = store.getWriteSizeBits(); this.currentBlock = null; this.closed = new AtomicBoolean(false); @@ -90,13 +84,13 @@ public NFSBufferedOutputStream(Configuration configuration, FileHandle handle, P // Create the task queues executors = - new ThreadPoolExecutor(32, poolSize, 1, TimeUnit.SECONDS, + new ThreadPoolExecutor(DEFAULT_WRITEBACK_POOL_SIZE, MAX_WRITEBACK_POOL_SIZE, 5, TimeUnit.SECONDS, new LinkedBlockingDeque(1024), new ThreadPoolExecutor.CallerRunsPolicy()); - ongoing = new LinkedList>(); + ongoing = new LinkedList<>(); // Set file offset to 0 or file length if (append) { - Nfs3FileAttributes attributes = store.getFileAttributes(handle, store.getCredentials()); + Nfs3FileAttributes attributes = store.getFileAttributes(handle, credentials); if (attributes != null) { fileOffset = attributes.getSize(); LOG.info("Appending to file so starting at offset = " + fileOffset); @@ -201,7 +195,7 @@ public synchronized void flush() throws IOException { } // Commit all outstanding changes - Commit commit = new Commit(this, store, handle, 0L, 0); + Commit commit = new Commit(this, store, handle, credentials, 0L, 0); Future future = executors.submit(commit); while (true) { try { @@ -221,15 +215,16 @@ public synchronized void flush() throws IOException { public synchronized void close() throws IOException { boolean first = true; + long start = System.currentTimeMillis(); if (closed.get() == true) { first = false; LOG.warn("Closing an already closed output stream"); } closed.set(true); - + // Shutdown the thread pool - if(first) { + if (first) { flush(); executors.shutdown(); try { @@ -241,6 +236,7 @@ public synchronized void close() throws IOException { LOG.info(statistics); super.close(); + LOG.info("OutputStream shutdown took " + (System.currentTimeMillis() - start) + " ms"); } private StreamBlock getBlock(long blockId) throws IOException { @@ -264,7 +260,7 @@ private void checkOngoing() throws IOException { try { f.get(); iter.remove(); - } catch (InterruptedException interruped) { + } catch (InterruptedException interrupted) { // Ignore } catch (ExecutionException execution) { throw new IOException("Write back call failed", execution); @@ -279,8 +275,7 @@ private void flushBlock(StreamBlock block) throws IOException { checkOngoing(); // Submit new task - Write call = - new Write(store, handle, statistics, block.getBlockId(), currentBlock); + Write call = new Write(store, handle, credentials, statistics, block.getBlockId(), currentBlock); Future future = executors.submit(call); ongoing.add(future); } diff --git a/src/main/java/org/apache/hadoop/fs/nfs/stream/Read.java b/src/main/java/org/apache/hadoop/fs/nfs/stream/Read.java index 927c8c4..a595606 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/stream/Read.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/stream/Read.java @@ -22,6 +22,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.response.READ3Response; +import org.apache.hadoop.oncrpc.security.Credentials; public class Read implements Callable { @@ -30,11 +31,13 @@ public class Read implements Callable { final StreamStatistics statistics; final Long blockId; final StreamBlock block; + final Credentials credentials; - public Read(NFSv3FileSystemStore store, FileHandle handle, StreamStatistics statistics, + public Read(NFSv3FileSystemStore store, FileHandle handle, Credentials credentials, StreamStatistics statistics, Long blockId, StreamBlock block) { this.store = store; this.handle = handle; + this.credentials = credentials; this.statistics = statistics; this.blockId = blockId; this.block = block; @@ -46,7 +49,7 @@ public Read call() throws Exception { long readOffset = (blockId << block.getBlockSizeBits()); READ3Response read3Response = - store.read(handle, readOffset, block.getBlockSize(), store.getCredentials()); + store.read(handle, readOffset, block.getBlockSize(), credentials); int status = read3Response.getStatus(); if (status != Nfs3Status.NFS3_OK) { throw new RpcException("NFS_READ error: status=" + status); diff --git a/src/main/java/org/apache/hadoop/fs/nfs/stream/Write.java b/src/main/java/org/apache/hadoop/fs/nfs/stream/Write.java index 832527e..1f841d4 100644 --- a/src/main/java/org/apache/hadoop/fs/nfs/stream/Write.java +++ b/src/main/java/org/apache/hadoop/fs/nfs/stream/Write.java @@ -23,18 +23,21 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; +import org.apache.hadoop.oncrpc.security.Credentials; public class Write implements Callable { final NFSv3FileSystemStore store; final FileHandle handle; + final Credentials credentials; final StreamStatistics statistics; final Long blockId; final StreamBlock block; - public Write(NFSv3FileSystemStore store, FileHandle handle, StreamStatistics statistics, Long blockId, StreamBlock block) { + public Write(NFSv3FileSystemStore store, FileHandle handle, Credentials credentials, StreamStatistics statistics, Long blockId, StreamBlock block) { this.store = store; this.handle = handle; + this.credentials = credentials; this.statistics = statistics; this.blockId = blockId; this.block = block; @@ -48,7 +51,7 @@ public Write call() throws Exception { byte buffer[] = new byte[block.getDataLength()]; System.arraycopy(block.array(), block.getDataStartOffset(), buffer, 0, block.getDataLength()); - WRITE3Response response = store.write(handle, writeOffset, block.getDataLength(), WriteStableHow.UNSTABLE, buffer, store.getCredentials()); + WRITE3Response response = store.write(handle, writeOffset, block.getDataLength(), WriteStableHow.UNSTABLE, buffer, credentials); int status = response.getStatus(); if (status != Nfs3Status.NFS3_OK) { throw new IOException("NFS write error: status=" + status); diff --git a/src/main/java/org/apache/hadoop/fs/nfs/topology/Endpoint.java b/src/main/java/org/apache/hadoop/fs/nfs/topology/Endpoint.java new file mode 100644 index 0000000..be6daac --- /dev/null +++ b/src/main/java/org/apache/hadoop/fs/nfs/topology/Endpoint.java @@ -0,0 +1,83 @@ +/** + * Copyright 2014 NetApp Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.nfs.topology; + +import com.google.gson.annotations.Expose; +import java.net.URI; +import java.util.Objects; + + +public class Endpoint { + + @Expose + final URI host; + @Expose + final String exportPath; + @Expose + final String path; + + public Endpoint(URI host, String export, String path) { + this.host = host; + this.exportPath = export; + this.path = path; + } + + public URI getUri() { + return host; + } + + public String getExportPath() { + return exportPath; + } + + public String getPath() { + return path; + } + + @Override + public String toString() { + return "Endpoint: host=" + host + " export=" + exportPath + " path=" + path; + } + + @Override + public int hashCode() { + int hash = 3; + hash = 97 * hash + Objects.hashCode(this.host); + hash = 97 * hash + Objects.hashCode(this.exportPath); + hash = 97 * hash + Objects.hashCode(this.path); + return hash; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + final Endpoint other = (Endpoint) obj; + if (!Objects.equals(this.host, other.host)) { + return false; + } + if (!Objects.equals(this.exportPath, other.exportPath)) { + return false; + } + if (!Objects.equals(this.path, other.path)) { + return false; + } + return true; + } + +} diff --git a/src/main/java/org/apache/hadoop/fs/nfs/topology/Mapping.java b/src/main/java/org/apache/hadoop/fs/nfs/topology/Mapping.java new file mode 100644 index 0000000..e53ffd7 --- /dev/null +++ b/src/main/java/org/apache/hadoop/fs/nfs/topology/Mapping.java @@ -0,0 +1,92 @@ +/** + * Copyright 2014 NetApp Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.nfs.topology; + +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonSyntaxException; +import com.google.gson.annotations.Expose; +import java.io.BufferedReader; +import java.io.FileReader; +import java.io.IOException; +import java.net.URI; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + + +public class Mapping { + + @Expose + List spaces; + + final static AtomicInteger counter = new AtomicInteger(1); + + public Mapping() { + spaces = new LinkedList<>(); + } + + public Mapping(List spaces) { + this.spaces = spaces; + } + + public Namespace getNamespace(URI uri) { + // Find one from the config file + if(spaces != null && spaces.size() > 0) { + for(Namespace ns : spaces) { + if(ns.getUri().getAuthority().equals(uri.getAuthority())) { + return ns; + } + } + } + // Make a new namespace with defaults + Namespace space = buildNamespace(uri); + if(spaces == null) { + spaces = new LinkedList<>(); + } + spaces.add(space); + return space; + } + + public Namespace buildNamespace(URI uri) { + Namespace space = new Namespace("ns-" + counter.getAndIncrement(), uri); + List endpoints = new LinkedList<>(); + endpoints.add(space.getDefaultEndpoint()); + space.setEndpoints(endpoints); + return space; + } + + public static Mapping loadFromString(String json) throws IOException { + try { + Gson gson = new GsonBuilder().excludeFieldsWithoutExposeAnnotation().create(); + Mapping map = gson.fromJson(json, Mapping.class); + return map; + } catch(JsonSyntaxException exception) { + throw new IOException("Could not parse config string", exception); + } + } + + public static Mapping loadFromFile(String filename) throws IOException { + try { + Gson gson = new GsonBuilder().excludeFieldsWithoutExposeAnnotation().create(); + Mapping map = gson.fromJson(new BufferedReader(new FileReader(filename)), Mapping.class); + return map; + } catch(JsonSyntaxException exception) { + throw new IOException("Could not parse config file " + filename, exception); + } catch(IOException exception) { + throw new IOException("Could not open namespace config file " + filename, exception); + } + } + +} diff --git a/src/main/java/org/apache/hadoop/fs/nfs/topology/Namespace.java b/src/main/java/org/apache/hadoop/fs/nfs/topology/Namespace.java new file mode 100644 index 0000000..af8f2d6 --- /dev/null +++ b/src/main/java/org/apache/hadoop/fs/nfs/topology/Namespace.java @@ -0,0 +1,96 @@ +/** + * Copyright 2014 NetApp Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.nfs.topology; + +import com.google.gson.annotations.Expose; +import java.net.URI; +import java.util.LinkedList; +import java.util.List; +import java.util.Objects; + + +public class Namespace { + + @Expose + final String name; + @Expose + final URI uri; + @Expose + final NamespaceOptions options; + @Expose + final List endpoints; + + public Namespace(String name, URI uri) { + this(name, uri, null); + } + + public Namespace(String name, URI uri, NamespaceOptions options) { + this.name = name; + this.uri = uri; + this.endpoints = new LinkedList<>(); + this.options = (options == null) ? new NamespaceOptions() : options; + } + + public void setEndpoints(List points) { + endpoints.clear(); + endpoints.addAll(points); + } + + public String getName() { + return name; + } + + public URI getUri() { + return uri; + } + + public NamespaceOptions getConfiguration() { + return options; + } + + public List getEndpoints() { + return endpoints; + } + + public Endpoint getDefaultEndpoint() { + return new Endpoint(uri, options.getNfsExportPath(), "/"); + } + + @Override + public int hashCode() { + int hash = 5; + hash = 29 * hash + Objects.hashCode(this.name); + hash = 29 * hash + Objects.hashCode(this.uri); + return hash; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + final Namespace other = (Namespace) obj; + if (!Objects.equals(this.name, other.name)) { + return false; + } + if (!Objects.equals(this.uri, other.uri)) { + return false; + } + return true; + } + +} diff --git a/src/main/java/org/apache/hadoop/fs/nfs/topology/NamespaceOptions.java b/src/main/java/org/apache/hadoop/fs/nfs/topology/NamespaceOptions.java new file mode 100644 index 0000000..2908a5a --- /dev/null +++ b/src/main/java/org/apache/hadoop/fs/nfs/topology/NamespaceOptions.java @@ -0,0 +1,177 @@ +/** + * Copyright 2014 NetApp Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.nfs.topology; + +import com.google.gson.annotations.Expose; +import org.apache.hadoop.fs.nfs.NFSv3FileSystem; + + +public class NamespaceOptions { + + @Expose + String nfsExportPath; + @Expose + int nfsReadSizeBits; + @Expose + int nfsWriteSizeBits; + @Expose + int nfsSplitSizeBits; + @Expose + String nfsAuthScheme; + @Expose + String nfsUsername; + @Expose + String nfsGroupname; + @Expose + int nfsUid; + @Expose + int nfsGid; + @Expose + int nfsPort; + @Expose + int nfsMountPort; + @Expose + int nfsRpcbindPort; + @Expose + String nfsAuthFile; + + public static final int INVALID_PORT = -1; + + static final NamespaceOptions DEFAULT; + + static { + DEFAULT = new NamespaceOptions(); + } + + public NamespaceOptions() { + setNfsReadSizeBits(NFSv3FileSystem.DEFAULT_READ_BLOCK_SIZE_BITS); + setNfsWriteSizeBits(NFSv3FileSystem.DEFAULT_WRITE_BLOCK_SIZE_BITS); + setNfsSplitSizeBits(NFSv3FileSystem.DEFAULT_NFS_SPLIT_SIZE_BITS); + setNfsUsername(NFSv3FileSystem.NFS_USER_NAME_DEFAULT); + setNfsGroupname(NFSv3FileSystem.NFS_GROUP_NAME_DEFAULT); + setNfsUid(NFSv3FileSystem.NFS_UID_DEFAULT); + setNfsGid(NFSv3FileSystem.NFS_GID_DEFAULT); + setNfsExportPath("/"); + setNfsPort(NFSv3FileSystem.DEFAULT_NFS_PORT); + setNfsMountPort(INVALID_PORT); + setNfsRpcbindPort(111); + setNfsAuthScheme("AUTH_NONE"); + } + + public static NamespaceOptions getDefaultOptions() { + return DEFAULT; + } + + public String getNfsExportPath() { + return nfsExportPath; + } + + public int getNfsReadSizeBits() { + return nfsReadSizeBits; + } + + public int getNfsWriteSizeBits() { + return nfsWriteSizeBits; + } + + public int getNfsSplitSizeBits() { + return nfsSplitSizeBits; + } + + public String getNfsAuthScheme() { + return nfsAuthScheme; + } + + public String getNfsUsername() { + return nfsUsername; + } + + public String getNfsGroupname() { + return nfsGroupname; + } + + public int getNfsUid() { + return nfsUid; + } + + public int getNfsGid() { + return nfsGid; + } + + public int getNfsPort() { + return nfsPort; + } + + public int getNfsMountPort() { + return nfsMountPort; + } + + public int getNfsRpcbindPort() { + return nfsRpcbindPort; + } + + public String getNfsAuthFile() { + return nfsAuthFile; + } + + public void setNfsExportPath(String nfsExportPath) { + this.nfsExportPath = nfsExportPath; + } + + public void setNfsAuthScheme(String authScheme) { + this.nfsAuthScheme = authScheme; + } + + public void setNfsReadSizeBits(int nfsReadSizeBits) { + this.nfsReadSizeBits = nfsReadSizeBits; + } + + public void setNfsWriteSizeBits(int nfsWriteSizeBits) { + this.nfsWriteSizeBits = nfsWriteSizeBits; + } + + public void setNfsSplitSizeBits(int nfsSplitSizeBits) { + this.nfsSplitSizeBits = nfsSplitSizeBits; + } + + public void setNfsUsername(String username) { + this.nfsUsername = username; + } + + public void setNfsGroupname(String groupname) { + this.nfsGroupname = groupname; + } + + public void setNfsUid(int uid) { + this.nfsUid = uid; + } + + public void setNfsGid(int gid) { + this.nfsGid = gid; + } + + public void setNfsPort(int port) { + this.nfsPort = port; + } + + public void setNfsMountPort(int port) { + this.nfsMountPort = port; + } + + public void setNfsRpcbindPort(int port) { + this.nfsRpcbindPort = port; + } + + +} diff --git a/src/main/java/org/apache/hadoop/fs/nfs/topology/SimpleTopologyRouter.java b/src/main/java/org/apache/hadoop/fs/nfs/topology/SimpleTopologyRouter.java new file mode 100644 index 0000000..99c16b1 --- /dev/null +++ b/src/main/java/org/apache/hadoop/fs/nfs/topology/SimpleTopologyRouter.java @@ -0,0 +1,129 @@ +/** + * Copyright 2014 NetApp Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.nfs.topology; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.nfs.NFSv3FileSystem; +import org.apache.hadoop.fs.nfs.NFSv3FileSystemStore; + + +public class SimpleTopologyRouter extends TopologyRouter { + + NFSv3FileSystem fs; + Namespace space; + Map stores; + public final static Log LOG = LogFactory.getLog(SimpleTopologyRouter.class); + + public SimpleTopologyRouter() { + stores = new HashMap<>(); + } + + @Override + public synchronized void initialize(NFSv3FileSystem fs, Namespace space, Configuration configuration) throws IOException { + this.fs = fs; + this.space = space; + this.configuration.addResource(configuration); + if(!verify(space)) { + throw new IOException("Check namespace to verify configuration"); + } + } + + private boolean verify(Namespace ns) { + + // Go through the endpoints - they should not be ambiguous + // Two endpoints cannot cover the same path (unless they are identical) + // Really stupid configs are possible and this code won't detect that! + for(Endpoint epi : ns.getEndpoints()) { + for(Endpoint epj : ns.getEndpoints()) { + String pi = (epi.getPath() == null) ? "/" : epi.getPath(); + String pj = (epj.getPath() == null) ? "/" : epj.getPath(); + if(pi.startsWith(pj) && !pi.equals(pj)) { + LOG.warn("Two endpoints " + epi + " and " + epj + " are overlapping which could lead to confusion. Please fix."); + return false; + } + } + } + + return true; + } + + @Override + public synchronized NFSv3FileSystemStore getStore(Path p) throws IOException { + + if(space == null) { + throw new IOException("No namespace defined!"); + } + if(p == null) { + throw new IOException("Path is null!"); + } + + // Choose an endpoint using the path + Endpoint ep = chooseEndpoint(space, p); + if(!stores.containsKey(ep)) { + NFSv3FileSystemStore store = new NFSv3FileSystemStore(fs, space, ep); + store.initialize(); + stores.put(ep, store); + } + return stores.get(ep); + } + + @Override + public synchronized Endpoint chooseEndpoint(Namespace space, Path p) throws IOException { + List endpoints = space.getEndpoints(); + List chosen = new LinkedList<>(); + + if(p == null || !p.isAbsolute()) { + throw new IOException("Need absolute path for choosing endpoint"); + } + + // Multiple endpoints are specified + if(endpoints != null && endpoints.size() > 0) { + for(Endpoint ep : endpoints) { + String epp = ep.getPath(); + if(epp == null || epp.length() == 0 || Path.getPathWithoutSchemeAndAuthority(p).toString().startsWith(epp)) { + chosen.add(ep); + } + } + + // No endpoint matches, so resort to using default one + if(chosen.isEmpty()) { + return space.getDefaultEndpoint(); + } + + // Pick one of the matching endpoints + String hostname = InetAddress.getLocalHost().getHostName(); + int id = Math.abs((hostname == null ? 0 : hostname.hashCode())) % chosen.size(); + LOG.debug("Choosing option " + id + " from " + chosen.size() + " options available"); + return chosen.get(id); + } + else { + return null; + } + } + + @Override + public synchronized List getAllStores() throws IOException { + return new LinkedList<>(stores.values()); + } + +} diff --git a/src/main/java/org/apache/hadoop/fs/nfs/topology/TopologyRouter.java b/src/main/java/org/apache/hadoop/fs/nfs/topology/TopologyRouter.java new file mode 100644 index 0000000..7d9b70f --- /dev/null +++ b/src/main/java/org/apache/hadoop/fs/nfs/topology/TopologyRouter.java @@ -0,0 +1,40 @@ +/** + * Copyright 2014 NetApp Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.nfs.topology; + +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.nfs.NFSv3FileSystem; +import org.apache.hadoop.fs.nfs.NFSv3FileSystemStore; + + +public abstract class TopologyRouter { + + final Configuration configuration; + + public TopologyRouter() { + this.configuration = new Configuration(); + } + + public abstract void initialize(NFSv3FileSystem fs, Namespace space, Configuration configuration) throws IOException; + + public abstract Endpoint chooseEndpoint(Namespace space, Path p) throws IOException; + + public abstract NFSv3FileSystemStore getStore(Path p) throws IOException; + + public abstract List getAllStores() throws IOException; + +} diff --git a/src/test/java/org/apache/hadoop/fs/NFSTestOptions.java b/src/test/java/org/apache/hadoop/fs/NFSTestOptions.java new file mode 100644 index 0000000..26a9384 --- /dev/null +++ b/src/test/java/org/apache/hadoop/fs/NFSTestOptions.java @@ -0,0 +1,23 @@ +/* + * Copyright 2015 Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + + +public class NFSTestOptions { + + public static final String NFS_TEST_OPTIONS_FILE = "nfs-test-options.xml"; + +} diff --git a/src/test/java/org/apache/hadoop/fs/TestNFSFileSystemContract.java b/src/test/java/org/apache/hadoop/fs/TestNFSFileSystemContract.java new file mode 100644 index 0000000..b797b04 --- /dev/null +++ b/src/test/java/org/apache/hadoop/fs/TestNFSFileSystemContract.java @@ -0,0 +1,41 @@ +/* + * Copyright 2015 Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.net.URI; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.nfs.NFSv3FileSystem; + + +public class TestNFSFileSystemContract extends FileSystemContractBaseTest { + + @Override + protected void setUp() throws Exception { + Configuration conf = new Configuration(); + conf.addResource(NFSTestOptions.NFS_TEST_OPTIONS_FILE); + if(conf.get("fs.defaultFS") != null) { + fs = new NFSv3FileSystem(); + fs.initialize(new URI(conf.get("fs.defaultFS")), conf); + } else { + throw new RuntimeException("Configuration option fs.defaultFS is not defined"); + } + } + + @Override + public void testMkdirsWithUmask() throws Exception { + // Don't understand the permissions problem yet + } +} diff --git a/src/test/java/org/apache/hadoop/fs/TestNFSMainOperations.java b/src/test/java/org/apache/hadoop/fs/TestNFSMainOperations.java new file mode 100644 index 0000000..7a35696 --- /dev/null +++ b/src/test/java/org/apache/hadoop/fs/TestNFSMainOperations.java @@ -0,0 +1,51 @@ +/* + * Copyright 2015 Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.net.URI; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.nfs.NFSv3FileSystem; + +public class TestNFSMainOperations extends FSMainOperationsBaseTest { + + public TestNFSMainOperations() { + super("/"); + } + + @Override + protected FileSystem createFileSystem() throws Exception { + Configuration conf = new Configuration(); + conf.addResource(NFSTestOptions.NFS_TEST_OPTIONS_FILE); + if (conf.get("fs.defaultFS") != null) { + FileSystem fs = new NFSv3FileSystem(); + fs.initialize(new URI(conf.get("fs.defaultFS")), conf); + return fs; + } else { + throw new RuntimeException("Configuration option fs.defaultFS is not defined"); + } + } + + @Override + public void testCopyToLocalWithUseRawLocalFileSystemOption() throws Exception { + // The root path for NFS is overlapping with root path of RawLocalFileSystem + } + + @Override + public void testListStatusThrowsExceptionForUnreadableDir() { + // Doesn't handle permissions well yet + } + +} diff --git a/src/test/java/org/apache/hadoop/fs/contract/NFSContract.java b/src/test/java/org/apache/hadoop/fs/contract/NFSContract.java new file mode 100644 index 0000000..7eb2a55 --- /dev/null +++ b/src/test/java/org/apache/hadoop/fs/contract/NFSContract.java @@ -0,0 +1,36 @@ +/* + * Copyright 2015 Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.contract; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.nfs.NFSv3FileSystem; + + +public class NFSContract extends AbstractBondedFSContract { + + public static final String CONTRACT_XML = "contract/nfs-contract.xml"; + + public NFSContract(Configuration conf) { + super(conf); + addConfResource(CONTRACT_XML); + } + + @Override + public String getScheme() { + return NFSv3FileSystem.NFS_URI_SCHEME; + } + +} diff --git a/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractCreate.java b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractCreate.java new file mode 100644 index 0000000..18113ac --- /dev/null +++ b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractCreate.java @@ -0,0 +1,33 @@ +/* + * Copyright 2015 Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.contract; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.NFSTestOptions; + + +public class TestNFSContractCreate extends AbstractContractCreateTest { + + @Override + protected AbstractFSContract createContract(Configuration conf) { + if(conf == null) { + conf = new Configuration(); + } + conf.addResource(NFSTestOptions.NFS_TEST_OPTIONS_FILE); + return new NFSContract(conf); + } + +} diff --git a/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractDelete.java b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractDelete.java new file mode 100644 index 0000000..8d7495c --- /dev/null +++ b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractDelete.java @@ -0,0 +1,33 @@ +/* + * Copyright 2015 Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.contract; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.NFSTestOptions; + + +public class TestNFSContractDelete extends AbstractContractDeleteTest { + + @Override + protected AbstractFSContract createContract(Configuration conf) { + if(conf == null) { + conf = new Configuration(); + } + conf.addResource(NFSTestOptions.NFS_TEST_OPTIONS_FILE); + return new NFSContract(conf); + } + +} diff --git a/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractMkdir.java b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractMkdir.java new file mode 100644 index 0000000..f76e1dd --- /dev/null +++ b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractMkdir.java @@ -0,0 +1,33 @@ +/* + * Copyright 2015 Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.contract; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.NFSTestOptions; + + +public class TestNFSContractMkdir extends AbstractContractMkdirTest { + + @Override + protected AbstractFSContract createContract(Configuration conf) { + if(conf != null) { + conf = new Configuration(); + } + conf.addResource(NFSTestOptions.NFS_TEST_OPTIONS_FILE); + return new NFSContract(conf); + } + +} diff --git a/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractOpen.java b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractOpen.java new file mode 100644 index 0000000..e7f6cfb --- /dev/null +++ b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractOpen.java @@ -0,0 +1,33 @@ +/* + * Copyright 2015 Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.contract; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.NFSTestOptions; + + +public class TestNFSContractOpen extends AbstractContractOpenTest { + + @Override + protected AbstractFSContract createContract(Configuration conf) { + if(conf == null) { + conf = new Configuration(); + } + conf.addResource(NFSTestOptions.NFS_TEST_OPTIONS_FILE); + return new NFSContract(conf); + } + +} diff --git a/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractRename.java b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractRename.java new file mode 100644 index 0000000..3225602 --- /dev/null +++ b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractRename.java @@ -0,0 +1,33 @@ +/* + * Copyright 2015 Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.contract; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.NFSTestOptions; + + +public class TestNFSContractRename extends AbstractContractRenameTest { + + @Override + protected AbstractFSContract createContract(Configuration conf) { + if(conf == null) { + conf = new Configuration(); + } + conf.addResource(NFSTestOptions.NFS_TEST_OPTIONS_FILE); + return new NFSContract(conf); + } + +} diff --git a/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractRootDir.java b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractRootDir.java new file mode 100644 index 0000000..eff1d80 --- /dev/null +++ b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractRootDir.java @@ -0,0 +1,33 @@ +/* + * Copyright 2015 Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.contract; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.NFSTestOptions; + + +public class TestNFSContractRootDir extends AbstractContractRootDirectoryTest { + + @Override + protected AbstractFSContract createContract(Configuration conf) { + if(conf == null) { + conf = new Configuration(); + } + conf.addResource(NFSTestOptions.NFS_TEST_OPTIONS_FILE); + return new NFSContract(conf); + } + +} diff --git a/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractSeek.java b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractSeek.java new file mode 100644 index 0000000..4a0843b --- /dev/null +++ b/src/test/java/org/apache/hadoop/fs/contract/TestNFSContractSeek.java @@ -0,0 +1,33 @@ +/* + * Copyright 2015 Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.contract; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.NFSTestOptions; + + +public class TestNFSContractSeek extends AbstractContractSeekTest { + + @Override + protected AbstractFSContract createContract(Configuration conf) { + if(conf == null) { + conf = new Configuration(); + } + conf.addResource(NFSTestOptions.NFS_TEST_OPTIONS_FILE); + return new NFSContract(conf); + } + +} diff --git a/src/test/java/org/apache/hadoop/fs/nfs/portmap/PortMapTest.java b/src/test/java/org/apache/hadoop/fs/nfs/portmap/PortMapTest.java deleted file mode 100644 index 42f56d1..0000000 --- a/src/test/java/org/apache/hadoop/fs/nfs/portmap/PortMapTest.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright 2014 NetApp Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.nfs.portmap; - -import static org.junit.Assert.*; - -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.portmap.PortmapMapping; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -public class PortMapTest { - - PortmapClient client; - - @Before - public void setUp() throws Exception { - client = new PortmapClient("localhost", 111); - } - - @After - public void tearDown() throws Exception { - client.shutdown(); - } - - @Test - public void testNull() { - try { - client.nullOp(); - } catch(IOException exception) { - fail("Portmap NULL operation failed"); - } - } - - @Test - public void testGetPort() { - try { - int port = client.getport(PortmapClient.PROGRAM, PortmapClient.VERSION, PortmapMapping.TRANSPORT_TCP); - assertEquals(111, port); - } catch(IOException exception) { - fail("Portmap GETPORT operation failed with exception=" + exception.getMessage()); - } - } - - @Test - public void testGetMapping() { - try { - - List programs = client.dump(); - assertNotNull(programs); - - // Should contains PORTMAP itself for sure - boolean found = false; - for(PortmapMapping prog : programs) { - if(prog.toString().equals("(PortmapMapping-" + (PortmapClient.PROGRAM) + ":" + (PortmapClient.VERSION) + ":" + (PortmapMapping.TRANSPORT_TCP) + ":" + (111) + ")")) { - found = true; - } - } - assertTrue(found); - } catch(IOException exception) { - fail("Portmap DUMP operation failed with exception=" + exception.getMessage()); - } - } - -} diff --git a/src/test/java/org/apache/hadoop/fs/nfs/topology/TestTopologyRouter.java b/src/test/java/org/apache/hadoop/fs/nfs/topology/TestTopologyRouter.java new file mode 100644 index 0000000..9bb17ad --- /dev/null +++ b/src/test/java/org/apache/hadoop/fs/nfs/topology/TestTopologyRouter.java @@ -0,0 +1,78 @@ +/* + * Copyright 2015 Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.nfs.topology; + +import java.net.URI; +import java.util.LinkedList; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.nfs.NFSv3FileSystem; +import static org.junit.Assert.assertEquals; +import org.junit.Test; + + +public class TestTopologyRouter { + + private Mapping getMappingWithSingleNamespace() throws Exception { + List spaces = new LinkedList<>(); + Namespace space = new Namespace("default", new URI("nfs://somehost:2049/")); + List points = new LinkedList<>(); + points.add(new Endpoint(new URI("nfs://host1"), "/vol/volA", "/data01")); + points.add(new Endpoint(new URI("nfs://host2"), "/vol/volB", "/data02")); + space.setEndpoints(points); + spaces.add(space); + return new Mapping(spaces); + } + + private Mapping getMappingWithSeveralMounts() throws Exception { + List spaces = new LinkedList<>(); + Namespace space = new Namespace("default", new URI("nfs://somehost:2049/")); + List points = new LinkedList<>(); + points.add(new Endpoint(new URI("nfs://host1"), "/vol/volA", "/data01")); + points.add(new Endpoint(new URI("nfs://host2"), "/vol/volB", "/data02")); + space.setEndpoints(points); + spaces.add(space); + return new Mapping(spaces); + } + + @Test + public void testDefaultNamespace() throws Exception { + Mapping mapping = new Mapping(); + Namespace ns = mapping.getNamespace(new URI("nfs://localhost:2049/foo/bar")); + assertEquals(0,ns.getEndpoints().size()); + } + + @Test + public void testSimpleMatch() throws Exception { + Mapping mapping = getMappingWithSingleNamespace(); + TopologyRouter router = new SimpleTopologyRouter(); + router.initialize(new NFSv3FileSystem(), mapping.getNamespace(new URI("nfs://somehost:2049/")), new Configuration()); + Endpoint chosen = router.chooseEndpoint(mapping.getNamespace(new URI("nfs://somehost:2049/")), new Path("/data01/X")); + assertEquals(new URI("nfs://somehost:2049/"), mapping.getNamespace(new URI("nfs://somehost:2049/")).getUri()); + assertEquals(new URI("nfs://host1"),chosen.getUri()); + } + + @Test + public void testSimpleMismatch() throws Exception { + Mapping mapping = getMappingWithSingleNamespace(); + TopologyRouter router = new SimpleTopologyRouter(); + router.initialize(new NFSv3FileSystem(), mapping.getNamespace(new URI("nfs://somehost:2049/")), new Configuration()); + Namespace ns = mapping.getNamespace(new URI("nfs://blahblah:2049/")); + assertEquals(new URI("nfs://blahblah:2049/"), ns.getUri()); + } + +} diff --git a/src/test/java/org/apache/hadoop/fs/nfsv3/BrokenServerTest.java b/src/test/java/org/apache/hadoop/fs/nfsv3/BrokenServerTest.java deleted file mode 100644 index 03d71b6..0000000 --- a/src/test/java/org/apache/hadoop/fs/nfsv3/BrokenServerTest.java +++ /dev/null @@ -1,177 +0,0 @@ -/** - * Copyright 2014 NetApp Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.nfsv3; - -import java.io.IOException; -import java.net.BindException; -import java.net.URI; -import java.util.EnumSet; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.nfs.NFSv3FileSystemStore; -import org.apache.hadoop.fs.nfsv3.server.FileObject; -import org.apache.hadoop.fs.nfsv3.server.MockNfs3Server; -import org.apache.hadoop.nfs.NfsTime; -import org.apache.hadoop.nfs.nfs3.Nfs3Constant; -import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; -import org.apache.hadoop.nfs.nfs3.Nfs3FileHandle; -import org.apache.hadoop.nfs.nfs3.Nfs3SetAttr; -import org.apache.hadoop.nfs.nfs3.request.SetAttr3.SetAttrField; -import org.apache.hadoop.oncrpc.security.CredentialsNone; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class BrokenServerTest { - - static final int NFS_SERVER_PORT = 8211; - static final String MOUNT_PATH = "/somepath"; - static NFSv3FileSystemStore store; - static MockNfs3Server nfsServer; - static Thread serverThread; - static int nfsServerPort; - - public final static Logger LOG = LoggerFactory.getLogger(BrokenServerTest.class); - - @BeforeClass - public static void setUp() throws Exception { - - // Start the Mock NFS server - nfsServerPort = NFS_SERVER_PORT; - while(nfsServerPort < 50000) { - try { - nfsServer = new MockNfs3Server(true, nfsServerPort); - serverThread = new Thread(nfsServer); - serverThread.start(); - LOG.info("Started mock NFS3 server ..."); - break; - } catch(BindException exception) { - nfsServerPort++; - continue; - } - } - - // Connect to NFS - Configuration conf = new Configuration(); - conf.setBoolean("mambo.test", true); - conf.set("mambo.test.mountpath", MOUNT_PATH); - store = new NFSv3FileSystemStore(new URI("nfs://localhost:" + nfsServerPort + "/"), conf, 2); - store.initialize(new URI("nfs://localhost:" + nfsServerPort + "/"), conf); - } - - @SuppressWarnings("deprecation") - @AfterClass - public static void tearDown() throws Exception { - serverThread.stop(); - - } - - @Test(expected = IOException.class) - public void CREATE() throws Exception { - store.create(new Nfs3FileHandle(0), "create_guarded_file1", Nfs3Constant.CREATE_GUARDED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void COMMIT() throws Exception { - store.commit(new Nfs3FileHandle(0L), 0L, 4096, new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void FSINFO() throws Exception { - store.fsinfo(new Nfs3FileHandle(0L), new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void FSINFO2() throws Exception { - store.getFilesystemInfo(new Nfs3FileHandle(0L), new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void GETATTR() throws Exception { - store.getattr(new Nfs3FileHandle(0L), new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void GETATTR2() throws Exception { - store.getFileAttributes(new Nfs3FileHandle(0L), new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void LOOKUP() throws Exception { - store.lookup(new Nfs3FileHandle(0L), "lookup1", new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void LOOKUP2() throws Exception { - store.getFileHandle(new Nfs3FileHandle(0L), "lookup1", new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void MKDIR() throws Exception { - store.mkdir(new Nfs3FileHandle(0L), "dir2", new Nfs3SetAttr(), new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void NULL() throws IOException { - store.nullOp(); - } - - @Test(expected = IOException.class) - public void READ() throws Exception { - store.read(new Nfs3FileHandle(0L), (long) FileObject.MAX_FILE_SIZE, 1, new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void READDIR() throws Exception { - store.readdir(new Nfs3FileHandle(0L), 0L, 0L, 65536, new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void READDIR2() throws Exception { - store.getDirectoryList(new Nfs3FileHandle(0L), 0L, 0L, 65536, new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void RENAME() throws Exception { - store.rename(new Nfs3FileHandle(0L), "rename1", new Nfs3FileHandle(0L), "rename2", new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void REMOVE() throws Exception { - store.remove(new Nfs3FileHandle(0L), "remove1", new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void RMDIR() throws Exception { - store.rmdir(new Nfs3FileHandle(0L), "remove1", new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void SETATTR() throws Exception { - Nfs3SetAttr attr = new Nfs3SetAttr(); - attr.setUid(100); - attr.setGid(100); - EnumSet updates = EnumSet.of(SetAttrField.UID, SetAttrField.GID); - attr.setUpdateFields(updates); - store.setattr(new Nfs3FileHandle(0L), attr, false, new NfsTime(System.currentTimeMillis()), new CredentialsNone()); - } - - @Test(expected = IOException.class) - public void WRITE() throws Exception { - byte[] largeData = new byte[100]; - store.write(new Nfs3FileHandle(0L), (long) FileObject.MAX_FILE_SIZE, largeData.length, WriteStableHow.FILE_SYNC, largeData, new CredentialsNone()); - } -} diff --git a/src/test/java/org/apache/hadoop/fs/nfsv3/NFSFileSystemTest.java b/src/test/java/org/apache/hadoop/fs/nfsv3/NFSFileSystemTest.java deleted file mode 100644 index aebdfb8..0000000 --- a/src/test/java/org/apache/hadoop/fs/nfsv3/NFSFileSystemTest.java +++ /dev/null @@ -1,353 +0,0 @@ -/** - * Copyright 2014 NetApp Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.nfsv3; - -import static org.junit.Assert.*; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.BlockLocation; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.nfs.NFSv3FileSystem; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; -import org.junit.After; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.URI; -import java.util.Arrays; - - -public class NFSFileSystemTest { - - static final String SERVER_HOSTNAME_DEFAULT = "nfs://atg-f-flamingo-d1.gdl.englab.netapp.com:2049/mambodata"; - static final String MOUNT_PATH_DEFAULT = "/mambodata"; - - public final static Logger LOG = LoggerFactory.getLogger(NFSFileSystemTest.class); - - //Test paths - public final static Path[] test_paths_dirs = {new Path("/unit_test/test0/subtest0"), new Path("/unit_test/test1/subtest1"), new Path("/unit_test/test2/subtest2") }; - public final static Path test_file_dir = new Path("/unit_test_files"); - public final static Path test_file_path_1 = new Path("/unit_test_files/testfile1"); - public final static Path test_file_path_2 = new Path("/unit_test_files/testfile_renamed"); - - private NFSv3FileSystem fs; - static String ServerHostname; - static String MountPath; - @BeforeClass - public static void setUpBeforeClass() throws Exception { - ServerHostname = System.getProperty("nfs_server"); - if (ServerHostname == null) { - ServerHostname = SERVER_HOSTNAME_DEFAULT; - } - MountPath = System.getProperty("path"); - if (MountPath == null) { - MountPath = MOUNT_PATH_DEFAULT; - } - } - @Before - public void setUp() throws Exception { - fs = new NFSv3FileSystem(); - - URI uri = new URI(ServerHostname); - Configuration conf = new Configuration(); - conf.setBoolean("mambo.test", false); - conf.set("fs.nfs.mountdir", MountPath); - - fs.initialize(uri, conf); - - for (Path dir : test_paths_dirs) { - fs.delete(dir, true); - } - fs.delete(test_file_dir, true); - fs.delete(test_file_path_1, true); - fs.delete(test_file_path_2, true); - } - - - @After - public void tearDown() throws Exception { - - for (Path dir : test_paths_dirs) { - fs.delete(dir, true); - } - fs.delete(test_file_dir, true); - fs.delete(test_file_path_1, true); - fs.delete(test_file_path_2, true); - - fs.close(); - } - - @Test - public void testNFSInit() throws Exception{ - Boolean ex_caught = false; - - @SuppressWarnings("resource") - NFSv3FileSystem temp_fs = new NFSv3FileSystem(); - Configuration conf = new Configuration(); - conf.setBoolean("mambo.test", false); - conf.set("fs.nfs.mountdir", MountPath); - - //wrong scheme will be taken as nfs - URI uri = new URI("http://atg-f-flamingo-d1.gdl.englab.netapp.com:2049/mambodata"); - try{ - temp_fs.initialize(uri, conf); - } catch (Exception e) { - ex_caught = true; - } - assertTrue(ex_caught); - ex_caught = false; - - //null authority will not be taken - temp_fs = new NFSv3FileSystem(); - uri = new URI(""); - try{ - temp_fs.initialize(uri, conf); - } catch (Exception e) { - ex_caught = true; - } - - assertTrue(ex_caught); - ex_caught = false; - - //null authority will not be taken - temp_fs = new NFSv3FileSystem(); - uri = new URI("blahblah"); - try{ - temp_fs.initialize(uri, conf); - } catch (Exception e) { - ex_caught = true; - } - - assertTrue(ex_caught); - ex_caught = false; - - } - - @Test - public void testFileSystemInfo() { - assertNotNull(fs); - Path p = fs.getWorkingDirectory(); - assertTrue(p.isRoot()); - - assertEquals(fs.getNfsBlockSize(), 1048576L); //1M block size - assertEquals(fs.getScheme(), NFSv3FileSystem.NFS_URI_SCHEME); - } - - @Test - public void testBlockLocation() throws Exception{ - Boolean ex_caught = false; - - BlockLocation[] locs = fs.getFileBlockLocations(new Path("/"), 0, 1024); - assertEquals(locs.length, 1); - - try{ - Path temp = null; - locs = fs.getFileBlockLocations(temp, 0, 1024); - } catch (Exception e) { - ex_caught = true; - } - - assertTrue(ex_caught); - } - - @SuppressWarnings("deprecation") - @Test - public void testDirs() throws Exception { - // making /test0/subtest0, /test1/subtest1 /test2/subtest2" - FsPermission permission = new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE); - assertTrue(fs.mkdirs(test_paths_dirs[0], permission)); - - assertTrue(fs.mkdirs(test_paths_dirs[1], permission)); - //The second time just returns true - assertTrue(fs.mkdirs(test_paths_dirs[1], permission)); - - assertTrue(fs.mkdirs(test_paths_dirs[2], permission)); - - FileStatus[] file_status = fs.listStatus(new Path("/unit_test")); - - for (int i = 0; i<3; i++) { - assertTrue(file_status[i].isDir()); - assertEquals(file_status[i].getPath(), new Path(fs.getWorkingDirectory(), test_paths_dirs[i].getParent())); - - } - //set working directory to /test0/subtest0 - Path newWorkingDir = new Path(fs.getWorkingDirectory(), test_paths_dirs[0]); - fs.setWorkingDirectory(test_paths_dirs[0]); - assertEquals(fs.getWorkingDirectory(), newWorkingDir); - Path subsub = new Path("subsubtest0"); - assertTrue(fs.mkdirs(subsub, permission)); - file_status = fs.listStatus(test_paths_dirs[0]); - - assertEquals(file_status[0].getPath(), new Path(fs.getWorkingDirectory(), new Path("/unit_test/test0/subtest0/subsubtest0"))); - - //Remove dir - assertFalse(fs.delete(test_paths_dirs[0].getParent(), false)); - assertTrue(fs.delete(test_paths_dirs[0].getParent(), true)); - - assertTrue(fs.delete(test_paths_dirs[1].getParent(), true)); - - assertTrue(fs.delete(test_paths_dirs[2].getParent(), true)); - - FileStatus[] file_status_after = fs.listStatus(new Path("/unit_test")); - assertEquals(file_status_after.length, 0); - - } - - @SuppressWarnings("deprecation") - @Test - public void testFiles() throws Exception { - //create a file "/unit_test_files/testfile1" - FsPermission permission = new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE); - assertTrue(fs.mkdirs(test_file_dir, permission)); - - String test_content_1 = new String("0123456789"); - - - FSDataOutputStream file_o_stream = fs.create(test_file_path_1); - assertNotNull(file_o_stream); - FileStatus[] file_status = fs.listStatus(new Path("/unit_test_files")); - assertEquals(file_status.length, 1); - assertFalse(file_status[0].isDir()); - assertEquals(file_status[0].getPath(), new Path(fs.getWorkingDirectory(), test_file_path_1)); - - - file_o_stream.writeBytes(test_content_1); - file_o_stream.flush(); - - - byte[] test1_buffer = new byte[test_content_1.length()]; - FSDataInputStream file_i_stream = fs.open(test_file_path_1); - file_i_stream.read(test1_buffer, 0, test1_buffer.length); - String buffer_str_1 = new String(test1_buffer); - assertNotNull(buffer_str_1); - assertNotNull(test_content_1); - assertTrue(test_content_1.equals(buffer_str_1)); - - file_i_stream.seek(0); - for(int i=0; i files = new HashSet(); - for(int i = 0; i < 100; ++i) { - CREATE3Response nfsCREATEResponse = store.create(rootHandle, "test" + i, Nfs3Constant.CREATE_UNCHECKED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsCREATEResponse.getStatus()); - files.add("test" + i); - } - - // List directory contents - READDIR3Response nfsREADDIRResponse = store.readdir(rootHandle, 0L, 0L, 65536, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsREADDIRResponse.getStatus()); - assertEquals(5, nfsREADDIRResponse.getDirList().getEntries().size()); - - // List directory contents - Nfs3DirList dirList = store.getDirectoryList(rootHandle, 0L, 0L, 65536, new CredentialsNone()); - assertEquals(5, dirList.getEntries().size()); - - for(Nfs3DirEntry entry : dirList.getEntries()) { - assertEquals(true, files.contains(entry.getName())); - } - - // Bad handle - READDIR3Response nfsREADDIRResponse2 = store.readdir(new Nfs3FileHandle(0L), 0L, 0L, 65536, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_BADHANDLE, nfsREADDIRResponse2.getStatus()); - Nfs3DirList dirList2 = store.getDirectoryList(new Nfs3FileHandle(0L), 0L, 0L, 65536, new CredentialsNone()); - assertNull(dirList2); - - } - -} diff --git a/src/test/java/org/apache/hadoop/fs/nfsv3/Nfs3Test.java b/src/test/java/org/apache/hadoop/fs/nfsv3/Nfs3Test.java deleted file mode 100644 index 41e7ad4..0000000 --- a/src/test/java/org/apache/hadoop/fs/nfsv3/Nfs3Test.java +++ /dev/null @@ -1,736 +0,0 @@ -/** - * Copyright 2014 NetApp Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.nfsv3; - -import static org.junit.Assert.*; - -import java.io.IOException; -import java.net.BindException; -import java.net.URI; -import java.util.EnumSet; -import java.util.Random; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.nfs.NFSv3FileSystemStore; -import org.apache.hadoop.fs.nfs.mount.MountClient; -import org.apache.hadoop.fs.nfs.mount.MountMNTResponse; -import org.apache.hadoop.fs.nfs.rpc.RpcClient; -import org.apache.hadoop.fs.nfsv3.server.FileObject; -import org.apache.hadoop.fs.nfsv3.server.MockNfs3Filesystem; -import org.apache.hadoop.fs.nfsv3.server.MockNfs3Server; -import org.apache.hadoop.mount.MountInterface; -import org.apache.hadoop.mount.MountResponse; -import org.apache.hadoop.nfs.NfsFileType; -import org.apache.hadoop.nfs.NfsTime; -import org.apache.hadoop.nfs.nfs3.FileHandle; -import org.apache.hadoop.nfs.nfs3.Nfs3Constant; -import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; -import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; -import org.apache.hadoop.nfs.nfs3.Nfs3FileHandle; -import org.apache.hadoop.nfs.nfs3.Nfs3Info; -import org.apache.hadoop.nfs.nfs3.Nfs3SetAttr; -import org.apache.hadoop.nfs.nfs3.Nfs3Status; -import org.apache.hadoop.nfs.nfs3.request.SetAttr3.SetAttrField; -import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response; -import org.apache.hadoop.nfs.nfs3.response.CREATE3Response; -import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response; -import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response; -import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response; -import org.apache.hadoop.nfs.nfs3.response.MKDIR3Response; -import org.apache.hadoop.nfs.nfs3.response.READ3Response; -import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response; -import org.apache.hadoop.nfs.nfs3.response.RENAME3Response; -import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response; -import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response; -import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; -import org.apache.hadoop.oncrpc.RpcAcceptedReply; -import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState; -import org.apache.hadoop.oncrpc.RpcMessage; -import org.apache.hadoop.oncrpc.RpcReply.ReplyState; -import org.apache.hadoop.oncrpc.XDR; -import org.apache.hadoop.oncrpc.security.CredentialsNone; -import org.apache.hadoop.oncrpc.security.CredentialsSys; -import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class Nfs3Test { - - static final int NFS_SERVER_PORT = 8211; - static final String MOUNT_PATH = "/somepath"; - static NFSv3FileSystemStore store; - static MockNfs3Server nfsServer; - static Thread serverThread; - static int nfsServerPort; - - public final static Logger LOG = LoggerFactory.getLogger(Nfs3Test.class); - - @BeforeClass - public static void setUp() throws Exception { - - // Start the Mock NFS server - nfsServerPort = NFS_SERVER_PORT; - while(nfsServerPort < 50000) { - try { - nfsServer = new MockNfs3Server(false, nfsServerPort); - serverThread = new Thread(nfsServer); - serverThread.start(); - LOG.info("Started mock NFS3 server ..."); - break; - } catch(BindException exception) { - nfsServerPort++; - continue; - } - } - - // Connect to NFS - Configuration conf = new Configuration(); - conf.setBoolean("mambo.test", true); - conf.set("mambo.test.mountpath", MOUNT_PATH); - store = new NFSv3FileSystemStore(new URI("nfs://localhost:" + nfsServerPort + "/"), conf, 2); - store.initialize(new URI("nfs://localhost:" + nfsServerPort + "/"), conf); - } - - @SuppressWarnings("deprecation") - @AfterClass - public static void tearDown() throws Exception { - serverThread.stop(); - - } - - @Test - public void NullConfiguration() throws Exception { - boolean errored = true; - - // Null configuration - { - NFSv3FileSystemStore temp = new NFSv3FileSystemStore(new URI("nfs://localhost:" + nfsServerPort + "/"), null, 1); - try { - temp.initialize(new URI("nfs://localhost:" + nfsServerPort + "/"), null); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } finally { - temp.shutdown(); - } - } - - { - Configuration conf = new Configuration(); - conf.setBoolean("mambo.test", true); - conf.set("mambo.test.mountpath", MOUNT_PATH); - NFSv3FileSystemStore temp = new NFSv3FileSystemStore(new URI("nfs://localhost:" + nfsServerPort + "/"), conf, 1); - temp.initialize(new URI("nfs://localhost:" + nfsServerPort + "/"), conf); - temp.shutdown(); - } - - { - Configuration conf = new Configuration(); - conf.setBoolean("mambo.test", true); - conf.set("mambo.test.mountpath", MOUNT_PATH); - conf.set("fs.nfs.auth.flavor", "BLAH"); - errored = false; - NFSv3FileSystemStore temp = null; - try { - temp = new NFSv3FileSystemStore(new URI("nfs://localhost:" + nfsServerPort + "/"), conf, 1); - temp.initialize(new URI("nfs://localhost:" + nfsServerPort + "/"), conf); - temp.shutdown(); - } catch(IOException exception) { - errored = true; - } finally { - assertTrue(errored); - } - } - - { - Configuration conf = new Configuration(); - conf.setBoolean("mambo.test", true); - conf.set("mambo.test.mountpath", MOUNT_PATH); - conf.set("fs.nfs.auth.flavor", "AUTH_NONE"); - NFSv3FileSystemStore temp = new NFSv3FileSystemStore(new URI("nfs://localhost:" + nfsServerPort + "/"), conf, 1); - assertEquals(AuthFlavor.AUTH_NONE, temp.getCredentials().getFlavor()); - temp.shutdown(); - } - - - { - Configuration conf = new Configuration(); - conf.setBoolean("mambo.test", true); - conf.set("mambo.test.mountpath", MOUNT_PATH); - conf.set("fs.nfs.auth.flavor", "AUTH_SYS"); - NFSv3FileSystemStore temp = new NFSv3FileSystemStore(new URI("nfs://localhost:" + nfsServerPort + "/"), conf, 1); - assertEquals(AuthFlavor.AUTH_SYS, temp.getCredentials().getFlavor()); - temp.shutdown(); - } - - { - Configuration conf = new Configuration(); - conf.setBoolean("mambo.test", true); - conf.set("mambo.test.mountpath", MOUNT_PATH); - conf.set("fs.nfs.auth.flavor", "AUTH_UNIX"); - NFSv3FileSystemStore temp = new NFSv3FileSystemStore(new URI("nfs://localhost:" + nfsServerPort + "/"), conf, 1); - assertEquals(AuthFlavor.AUTH_SYS, temp.getCredentials().getFlavor()); - temp.shutdown(); - } - - } - - @Test - public void Mount() throws Exception { - RpcClient client = new RpcClient("localhost", nfsServerPort); - - // Send the RPC request - XDR in = new XDR(); - in.writeString("/mountpath"); - XDR out = new XDR(); - RpcMessage reply = client.service(MountClient.MOUNTD_PROGRAM, MountClient.MOUNTD_VERSION, MountInterface.MNTPROC.MNT.getValue(), in, out, new CredentialsNone()); - assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType()); - - RpcAcceptedReply accepted = (RpcAcceptedReply) reply; - assertEquals(ReplyState.MSG_ACCEPTED, accepted.getState()); - - client.shutdown(); - - } - - @SuppressWarnings("unused") - @Test - public void MountClient() throws Exception { - - { - MountClient client = new MountClient("localhost", nfsServerPort, null); - MountMNTResponse response = client.mnt("/mountpath"); - assertNotNull(response); - assertEquals(MountResponse.MNT_OK, response.getStatus()); - assertNotNull(response.getAuthFlavors()); - assertEquals(AuthFlavor.AUTH_NONE.getValue(), response.getAuthFlavors()[0]); - - MountMNTResponse response2 = client.mnt("/blah"); - assertNotNull(response2); - assertEquals(MountResponse.MNT_OK, response2.getStatus()); - assertNull(response2.getAuthFlavors()); - - client.shutdown(); - } - - { - Configuration conf = new Configuration(); - conf.set("fs.nfs.mount.auth.flavor", "AUTH_NONE"); - MountClient client = new MountClient("localhost", nfsServerPort, conf); - MountMNTResponse response = client.mnt("/mountpath"); - assertNotNull(response); - assertEquals(MountResponse.MNT_OK, response.getStatus()); - assertNotNull(response.getAuthFlavors()); - assertEquals(AuthFlavor.AUTH_NONE.getValue(), response.getAuthFlavors()[0]); - client.shutdown(); - } - - { - Configuration conf = new Configuration(); - conf.set("fs.nfs.mount.auth.flavor", "BLAH"); - MountClient client = new MountClient("localhost", nfsServerPort, conf); - MountMNTResponse response = client.mnt("/mountpath"); - assertNotNull(response); - assertEquals(MountResponse.MNT_OK, response.getStatus()); - assertNotNull(response.getAuthFlavors()); - assertEquals(AuthFlavor.AUTH_NONE.getValue(), response.getAuthFlavors()[0]); - client.shutdown(); - } - - { - Configuration conf = new Configuration(); - conf.set("fs.nfs.mount.auth.flavor", "AUTH_SYS"); - MountClient client2 = new MountClient("localhost", nfsServerPort, conf); - boolean errored = true; - try { - MountMNTResponse response3 = client2.mnt("/mountpath"); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } - } - } - - @Test - public void ProgramMismatch() throws Exception { - RpcClient client = new RpcClient("localhost", nfsServerPort); - - // Should be a reply message - RpcMessage reply = client.service(1, 1, 1, new XDR(), new XDR(), new CredentialsNone()); - assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType()); - - // Should be mismatch - RpcAcceptedReply accepted = (RpcAcceptedReply) reply; - assertEquals(ReplyState.MSG_ACCEPTED, accepted.getState()); - assertEquals(AcceptState.PROG_MISMATCH, accepted.getAcceptState()); - - client.shutdown(); - - } - - @SuppressWarnings("unused") - @Test - public void StrongCredentialsNotAccepted() throws Exception { - FileHandle rootHandle = store.getRootfh(); - boolean errored = true; - try { - GETATTR3Response nfsGETATTRResponse = store.getattr(rootHandle, new CredentialsSys()); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } - } - - @Test - public void CREATE() throws Exception { - FileHandle rootHandle = store.getRootfh(); - - // Create new file, should not exist already - CREATE3Response nfsCREATEResponse1 = store.create(rootHandle, "create_guarded_file1", Nfs3Constant.CREATE_GUARDED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsCREATEResponse1.getStatus()); - - // Create new file, check that it says that file already exists - CREATE3Response nfsCREATEResponse2 = store.create(rootHandle, "create_guarded_file1", Nfs3Constant.CREATE_GUARDED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_EXIST, nfsCREATEResponse2.getStatus()); - - // Create new file, it's ok if file already exists - CREATE3Response nfsCREATEResponse3 = store.create(rootHandle, "create_guarded_file1", Nfs3Constant.CREATE_UNCHECKED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsCREATEResponse3.getStatus()); - - // Create new file - CREATE3Response nfsCREATEResponse4 = store.create(rootHandle, "create_guarded_file2", Nfs3Constant.CREATE_UNCHECKED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsCREATEResponse4.getStatus()); - - // Create but no handle is sent back - CREATE3Response nfsCREATEResponse5 = store.create(rootHandle, "sendnohandle", Nfs3Constant.CREATE_UNCHECKED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsCREATEResponse5.getStatus()); - - // Invalid credentials - boolean errored = true; - try { - store.create(rootHandle, "create_guarded_file3", Nfs3Constant.CREATE_UNCHECKED, new Nfs3SetAttr(), 0L, new CredentialsSys()); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } - - } - - @Test - public void COMMIT() throws Exception { - FileHandle rootHandle = store.getRootfh(); - - // Create a temporary file - CREATE3Response nfsCREATEResponse = store.create(rootHandle, "commit_test", Nfs3Constant.CREATE_UNCHECKED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsCREATEResponse.getStatus()); - - FileHandle fileHandle = nfsCREATEResponse.getObjHandle(); - byte[] largeData = new byte[65536]; - Random random = new Random(); - random.nextBytes(largeData); - - // Write some data and commit - for(int i = 0; i < FileObject.MAX_FILE_SIZE; i += largeData.length) { - WRITE3Response nfsWRITEResponse = store.write(fileHandle, (long) i, largeData.length, WriteStableHow.FILE_SYNC, largeData, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsWRITEResponse.getStatus()); - COMMIT3Response nfsCOMMITResponse = store.commit(fileHandle, (long) i, largeData.length, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsCOMMITResponse.getStatus()); - } - - // Commit using a bad handle - COMMIT3Response nfsCOMMITResponse = store.commit(new Nfs3FileHandle(0L), 0L, 4096, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_BADHANDLE, nfsCOMMITResponse.getStatus()); - - // Invalid credentials - boolean errored = true; - try { - store.commit(fileHandle, (long) 0, largeData.length, new CredentialsSys()); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } - - } - - @Test - public void FSINFO() throws Exception { - FileHandle handle = store.getRootfh(); - - //Good response - FSINFO3Response response = store.fsinfo(handle, store.getCredentials()); - assertEquals(Nfs3Status.NFS3_OK, response.getStatus()); - - // Bad response due to bad handle - FSINFO3Response response2 = store.fsinfo(new FileHandle(0), store.getCredentials()); - assertEquals(Nfs3Status.NFS3ERR_BADHANDLE, response2.getStatus()); - - //Good response - Nfs3Info info = store.getFilesystemInfo(handle, store.getCredentials()); - assertNotNull(info); - - // Bad response due to bad handle - Nfs3Info info2 = store.getFilesystemInfo(new FileHandle(0), store.getCredentials()); - assertNull(info2); - - } - - - @Test - public void GETATTR() throws Exception { - FileHandle rootHandle = store.getRootfh(); - - // Create directory - MKDIR3Response nfsMKDIRResponse = store.mkdir(rootHandle, "getattr1", new Nfs3SetAttr(), new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsMKDIRResponse.getStatus()); - - // Get the attributes of directory - GETATTR3Response nfsGETATTRResponse = store.getattr(nfsMKDIRResponse.getObjFileHandle(), new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsGETATTRResponse.getStatus()); - - // Get the attributes of directory - Nfs3FileAttributes nfsGETATTRResponse2 = store.getFileAttributes(nfsMKDIRResponse.getObjFileHandle(), new CredentialsNone()); - assertEquals(NfsFileType.NFSDIR.toValue(), nfsGETATTRResponse2.getType()); - assertEquals(MockNfs3Filesystem.MOCK_FSID, (int) nfsGETATTRResponse2.getFsid()); - - // Bad file handle - GETATTR3Response nfsGETATTRResponse3 = store.getattr(new Nfs3FileHandle(0), new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_BADHANDLE, nfsGETATTRResponse3.getStatus()); - - // Bad file handle - boolean errored = false; - try { - store.getFileAttributes(new Nfs3FileHandle(0L), new CredentialsNone()); - } catch(IOException exception) { - errored = true; - } finally { - assertTrue(errored); - } - - // Invalid credentials - errored = true; - try { - store.getFileAttributes(nfsMKDIRResponse.getObjFileHandle(), new CredentialsSys()); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } - - } - - @Test - public void LOOKUP() throws Exception { - FileHandle rootHandle = store.getRootfh(); - - // Before directory creation - LOOKUP3Response nfsLOOKUPResponse1 = store.lookup(rootHandle, "lookup1", new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_NOENT, nfsLOOKUPResponse1.getStatus()); - - // Create directory - MKDIR3Response nfsMKDIRResponse = store.mkdir(rootHandle, "lookup1", new Nfs3SetAttr(), new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsMKDIRResponse.getStatus()); - - // Check again - LOOKUP3Response nfsLOOKUPResponse2 = store.lookup(rootHandle, "lookup1", new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsLOOKUPResponse2.getStatus()); - - // Invalid credentials - boolean errored = true; - try { - store.lookup(rootHandle, "lookup1", new CredentialsSys()); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } - - } - - @Test - public void MKDIR() throws Exception { - FileHandle rootHandle = store.getRootfh(); - - // Create directory - MKDIR3Response nfsMKDIRResponse = store.mkdir(rootHandle, "dir2", new Nfs3SetAttr(), new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsMKDIRResponse.getStatus()); - - // Create invalid directory - MKDIR3Response nfsMKDIRResponse2 = store.mkdir(new Nfs3FileHandle(0L), "dir2", new Nfs3SetAttr(), new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_BADHANDLE, nfsMKDIRResponse2.getStatus()); - - } - - @Test - public void NULL() throws Exception { - store.nullOp(); - } - - @Test - public void READ() throws Exception { - FileHandle rootHandle = store.getRootfh(); - byte[] smallData = new byte[4096]; - byte[] mediumData = new byte[16384]; - byte[] largeData = new byte[65536]; - - CREATE3Response nfsCREATEResponse1 = store.create(rootHandle, "read_test_file1", Nfs3Constant.CREATE_GUARDED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsCREATEResponse1.getStatus()); - - // Get file handle - FileHandle fileHandle = nfsCREATEResponse1.getObjHandle(); - - // Initialize the data - Random random = new Random(); - random.nextBytes(smallData); - random.nextBytes(mediumData); - random.nextBytes(largeData); - - // Write small data - for(int i = 0; i < FileObject.MAX_FILE_SIZE; i += smallData.length) { - WRITE3Response nfsWRITEResponse = store.write(fileHandle, (long) i, smallData.length, WriteStableHow.FILE_SYNC, smallData, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsWRITEResponse.getStatus()); - - READ3Response nfsREADResponse = store.read(fileHandle, (long) i, smallData.length, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsREADResponse.getStatus()); - byte[] readData = nfsREADResponse.getData().array(); - for(int j = 0; j < smallData.length; ++j) { - assertEquals(smallData[j], readData[j]); - } - } - - // Write medium sized data - for(int i = 0; i < FileObject.MAX_FILE_SIZE; i += mediumData.length) { - WRITE3Response nfsWRITEResponse = store.write(fileHandle, (long) i, mediumData.length, WriteStableHow.FILE_SYNC, mediumData, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsWRITEResponse.getStatus()); - - READ3Response nfsREADResponse = store.read(fileHandle, (long) i, mediumData.length, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsREADResponse.getStatus()); - byte[] readData = nfsREADResponse.getData().array(); - for(int j = 0; j < mediumData.length; ++j) { - assertEquals(mediumData[j], readData[j]); - } - } - - // Write large sized data - for(int i = 0; i < FileObject.MAX_FILE_SIZE; i += largeData.length) { - WRITE3Response nfsWRITEResponse = store.write(fileHandle, (long) i, largeData.length, WriteStableHow.FILE_SYNC, largeData, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsWRITEResponse.getStatus()); - - READ3Response nfsREADResponse = store.read(fileHandle, (long) i, largeData.length, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsREADResponse.getStatus()); - byte[] readData = nfsREADResponse.getData().array(); - for(int j = 0; j < largeData.length; ++j) { - assertEquals(largeData[j], readData[j]); - } - } - - // Read beyond the file - READ3Response nfsREADResponse = store.read(fileHandle, (long) FileObject.MAX_FILE_SIZE, 1, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_INVAL, nfsREADResponse.getStatus()); - - // Invalid credentials - boolean errored = true; - try { - store.read(fileHandle, (long) FileObject.MAX_FILE_SIZE, 1, new CredentialsNone()); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } - - } - - @Test - public void RENAME() throws Exception { - - FileHandle rootHandle = store.getRootfh(); - - // Create a test file - CREATE3Response nfsCREATEResponse1 = store.create(rootHandle, "rename1", Nfs3Constant.CREATE_UNCHECKED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsCREATEResponse1.getStatus()); - - // Rename existing file - RENAME3Response nfsRENAMEResponse1 = store.rename(rootHandle, "rename1", rootHandle, "rename2", new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsRENAMEResponse1.getStatus()); - - // Rename unknown file - RENAME3Response nfsRENAMEResponse2 = store.rename(rootHandle, "renameblahblah", rootHandle, "renamefoobar", new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_NOENT, nfsRENAMEResponse2.getStatus()); - - // Invalid credentials - boolean errored = true; - try { - store.rename(rootHandle, "renameblahblah", rootHandle, "renamefoobar", new CredentialsSys()); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } - - } - - @Test - public void REMOVE() throws Exception { - FileHandle rootHandle = store.getRootfh(); - - // Create a test file - CREATE3Response nfsCREATEResponse1 = store.create(rootHandle, "remove1", Nfs3Constant.CREATE_UNCHECKED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsCREATEResponse1.getStatus()); - - // Remove existing file - REMOVE3Response nfsREMOVEResponse1 = store.remove(rootHandle, "remove1", new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsREMOVEResponse1.getStatus()); - - LOOKUP3Response nfsLOOKUPResponse1 = store.lookup(rootHandle, "remove1", new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_NOENT, nfsLOOKUPResponse1.getStatus()); - - // Remove unknown file - REMOVE3Response nfsREMOVEResponse2 = store.remove(rootHandle, "remove2", new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_NOENT, nfsREMOVEResponse2.getStatus()); - - // Invalid credentials - boolean errored = true; - try { - store.remove(rootHandle, "remove2", new CredentialsSys()); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } - - } - - @Test - public void RMDIR() throws Exception { - FileHandle rootHandle = store.getRootfh(); - - // Create directory - MKDIR3Response nfsMKDIRResponse = store.mkdir(rootHandle, "rmdir1", new Nfs3SetAttr(), new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsMKDIRResponse.getStatus()); - - // Remove existing directory - RMDIR3Response nfsRMDIRResponse1 = store.rmdir(rootHandle, "rmdir1", new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsRMDIRResponse1.getStatus()); - - LOOKUP3Response nfsLOOKUPResponse1 = store.lookup(rootHandle, "rmdir1", new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_NOENT, nfsLOOKUPResponse1.getStatus()); - - // Remove unknown directory - RMDIR3Response nfsRMDIRResponse2 = store.rmdir(rootHandle, "rmdir2", new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_NOENT, nfsRMDIRResponse2.getStatus()); - - // Invalid credentials - boolean errored = true; - try { - store.rmdir(rootHandle, "rmdir3", new CredentialsSys()); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } - - } - - @Test - public void SETATTR() throws Exception { - FileHandle rootHandle = store.getRootfh(); - - // Create a test file - CREATE3Response nfsCREATEResponse1 = store.create(rootHandle, "setattr1", Nfs3Constant.CREATE_UNCHECKED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsCREATEResponse1.getStatus()); - - // Set attributes - Nfs3SetAttr attr = new Nfs3SetAttr(); - attr.setUid(100); - attr.setGid(100); - EnumSet updates = EnumSet.of(SetAttrField.UID, SetAttrField.GID); - attr.setUpdateFields(updates); - SETATTR3Response nfsSETATTRResponse = store.setattr(nfsCREATEResponse1.getObjHandle(), attr, false, new NfsTime(System.currentTimeMillis()), new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsSETATTRResponse.getStatus()); - assertEquals(100, nfsSETATTRResponse.getWccData().getPostOpAttr().getGid()); - assertEquals(100, nfsSETATTRResponse.getWccData().getPostOpAttr().getUid()); - - // Set attr with time check - Nfs3SetAttr attr2 = new Nfs3SetAttr(); - attr2.setUid(100); - attr2.setGid(100); - EnumSet updates2 = EnumSet.of(SetAttrField.UID, SetAttrField.GID); - attr2.setUpdateFields(updates2); - SETATTR3Response nfsSETATTRResponse2 = store.setattr(nfsCREATEResponse1.getObjHandle(), attr2, true, new NfsTime(System.currentTimeMillis()), new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsSETATTRResponse2.getStatus()); - assertEquals(100, nfsSETATTRResponse2.getWccData().getPostOpAttr().getGid()); - assertEquals(100, nfsSETATTRResponse2.getWccData().getPostOpAttr().getUid()); - - // Invalid credentials - boolean errored = true; - try { - store.setattr(nfsCREATEResponse1.getObjHandle(), attr, false, new NfsTime(System.currentTimeMillis()), new CredentialsSys()); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } - - } - - @Test - public void WRITE() throws Exception { - FileHandle rootHandle = store.getRootfh(); - byte[] smallData = new byte[4096]; - byte[] mediumData = new byte[16384]; - byte[] largeData = new byte[65536]; - - CREATE3Response nfsCREATEResponse1 = store.create(rootHandle, "write_test_file1", Nfs3Constant.CREATE_GUARDED, new Nfs3SetAttr(), 0L, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsCREATEResponse1.getStatus()); - - // Get file handle - FileHandle fileHandle = nfsCREATEResponse1.getObjHandle(); - - // Initialize the data - Random random = new Random(); - random.nextBytes(smallData); - random.nextBytes(mediumData); - random.nextBytes(largeData); - - // Write small data - for(int i = 0; i < FileObject.MAX_FILE_SIZE; i += smallData.length) { - WRITE3Response nfsWRITEResponse = store.write(fileHandle, (long) i, smallData.length, WriteStableHow.FILE_SYNC, smallData, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsWRITEResponse.getStatus()); - - Nfs3FileAttributes nfsGETATTRResponse = store.getFileAttributes(fileHandle, new CredentialsNone()); - assertEquals((long) (i + smallData.length), nfsGETATTRResponse.getSize()); - - } - - for(int i = 0; i < FileObject.MAX_FILE_SIZE; i += mediumData.length) { - WRITE3Response nfsWRITEResponse = store.write(fileHandle, (long) i, mediumData.length, WriteStableHow.FILE_SYNC, mediumData, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsWRITEResponse.getStatus()); - } - - for(int i = 0; i < FileObject.MAX_FILE_SIZE; i += largeData.length) { - WRITE3Response nfsWRITEResponse = store.write(fileHandle, (long) i, largeData.length, WriteStableHow.FILE_SYNC, largeData, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3_OK, nfsWRITEResponse.getStatus()); - } - - // Write beyond the file - WRITE3Response nfsWRITEResponse = store.write(fileHandle, (long) FileObject.MAX_FILE_SIZE, largeData.length, WriteStableHow.FILE_SYNC, largeData, new CredentialsNone()); - assertEquals(Nfs3Status.NFS3ERR_FBIG, nfsWRITEResponse.getStatus()); - - boolean errored = true; - try { - store.write(fileHandle, (long) FileObject.MAX_FILE_SIZE, largeData.length, WriteStableHow.FILE_SYNC, largeData, new CredentialsSys()); - errored = false; - } catch(IOException exception) { - assertTrue(errored); - } - } - - -} diff --git a/src/test/java/org/apache/hadoop/fs/nfsv3/server/DirectoryObject.java b/src/test/java/org/apache/hadoop/fs/nfsv3/server/DirectoryObject.java deleted file mode 100644 index 27fff09..0000000 --- a/src/test/java/org/apache/hadoop/fs/nfsv3/server/DirectoryObject.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Copyright 2014 NetApp Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.nfsv3.server; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Set; - -import org.apache.hadoop.nfs.NfsFileType; -import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; -import org.apache.hadoop.nfs.nfs3.request.SetAttr3; - -public class DirectoryObject extends FsObject { - - final Set contents; - - protected DirectoryObject(String name) { - super(FileType.TYPE_DIRECTORY, name); - contents = new HashSet(); - } - - protected DirectoryObject(String name, long id) { - super(FileType.TYPE_DIRECTORY, name, id); - contents = new HashSet(); - } - - public Set listDirectoryContents() { - return contents; - } - - @Override - public void setAttr(SetAttr3 attr) { - NfsFileType nfsType = (this.type.equals(FileType.TYPE_DIRECTORY)) ? NfsFileType.NFSDIR : NfsFileType.NFSREG; - int nlink = 0; - short mode = (short) attr.getMode(); - int uid = attr.getUid(); - int gid = attr.getGid(); - int size = 4096; - int fsid = MockNfs3Filesystem.MOCK_FSID; - int fileId = (int) this.getId(); - int mtime = (int) System.currentTimeMillis(); - if(attr.getMtime() != null) { - mtime = attr.getMtime().getSeconds(); - } - int atime = (int) System.currentTimeMillis(); - if(attr.getAtime() != null) { - atime = attr.getAtime().getSeconds(); - } - this.attr = new Nfs3FileAttributes(nfsType, nlink, mode, uid, gid, size, fsid, fileId, mtime, atime); - } - - public FsObject getItemInDirectory(String name) { - for(FsObject obj : contents) { - if(obj.getFilename().equals(name)) { - return obj; - } - } - return null; - } - - public void addNewFile(FileObject file) throws IOException { - if(contents.contains(file)) { - throw new IOException("Directory " + this + " already contains file " + file); - } else { - contents.add(file); - } - } - - public void addNewDirectory(DirectoryObject dir) throws IOException { - if(contents.contains(dir)) { - throw new IOException("Directory " + this + " already contains directory " + dir); - } else { - contents.add(dir); - } - } - - public void removeFileFromDirectory(FileObject file) throws IOException { - assert(contents.contains(file)); - contents.remove(file); - } - - public void removeDirectoryFromDirectory(DirectoryObject dir) throws IOException { - assert(contents.contains(dir)); - assert(dir.listDirectoryContents().size() == 0); - contents.remove(dir); - } - -} diff --git a/src/test/java/org/apache/hadoop/fs/nfsv3/server/FileObject.java b/src/test/java/org/apache/hadoop/fs/nfsv3/server/FileObject.java deleted file mode 100644 index 3c533da..0000000 --- a/src/test/java/org/apache/hadoop/fs/nfsv3/server/FileObject.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Copyright 2014 NetApp Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.nfsv3.server; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.apache.hadoop.nfs.NfsFileType; -import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; -import org.apache.hadoop.nfs.nfs3.request.SetAttr3; - - -public class FileObject extends FsObject { - - byte[] data; - int size; - - protected FileObject(String name) { - super(FileType.TYPE_FILE, name); - size = 0; - data = new byte[MAX_FILE_SIZE]; - } - - public int getSize() { - return size; - } - - @Override - public void setAttr(SetAttr3 attr) { - NfsFileType nfsType = (this.type.equals(FileType.TYPE_DIRECTORY)) ? NfsFileType.NFSDIR : NfsFileType.NFSREG; - int nlink = 0; - short mode = (short) attr.getMode(); - int uid = attr.getUid(); - int gid = attr.getGid(); - int fsid = MockNfs3Filesystem.MOCK_FSID; - int fileId = (int) this.getId(); - int mtime = (attr.getMtime() == null) ? ( (int) System.currentTimeMillis() / 1000) : attr.getMtime().getSeconds(); - int atime = (attr.getAtime() == null) ? ( (int) System.currentTimeMillis() / 1000) : attr.getAtime().getSeconds(); - super.attr = new Nfs3FileAttributes(nfsType, nlink, mode, uid, gid, this.size, fsid, fileId, mtime, atime); - } - - public void write(long offset, int length, ByteBuffer writeData) throws IOException { - - int newSize = (int) (offset + length); - if( (offset + length) > FsObject.MAX_FILE_SIZE) { - throw new IOException("File is too big!"); - } - - // Byte buffer is allocated to maximum file size - // so just copy the contents into the right place - System.arraycopy(writeData.array(), 0, data, (int) offset, length); - size = (size > newSize) ? size : newSize; - - // Update attributes - this.attr.setSize(size); - - return; - } - - public int read(long offset, int length, byte[] readData) throws IOException { - - if( (offset + length) > size) { - throw new IOException("Can't read beyond file length"); - } - assert(length > 0 && length < FsObject.MAX_FILE_SIZE); - - if( ((int) offset + length) > size) { - length = size - (int) offset; - } - System.arraycopy(data, (int) offset, readData, 0, length); - - return length; - - } - -} diff --git a/src/test/java/org/apache/hadoop/fs/nfsv3/server/FsObject.java b/src/test/java/org/apache/hadoop/fs/nfsv3/server/FsObject.java deleted file mode 100644 index 11eacd7..0000000 --- a/src/test/java/org/apache/hadoop/fs/nfsv3/server/FsObject.java +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Copyright 2014 NetApp Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.nfsv3.server; - -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.nfs.NfsTime; -import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; -import org.apache.hadoop.nfs.nfs3.request.SetAttr3; -import org.apache.hadoop.nfs.nfs3.response.WccAttr; - -public abstract class FsObject implements Comparable { - - String name; - long id; - byte[] data; - Nfs3FileAttributes attr; - FileType type; - - public static final int MAX_FILE_SIZE = 2*1024*1024; - public static final AtomicLong fileIdCounter; - - static { - fileIdCounter = new AtomicLong(1); - } - - public enum FileType {TYPE_DIRECTORY, TYPE_FILE}; - - protected FsObject(FileType type, String name, long id) { - this.type = type; - this.name = name; - this.id = (id <= 0L ? fileIdCounter.getAndIncrement() : id); - data = new byte[MAX_FILE_SIZE]; - attr = new Nfs3FileAttributes(); - } - - protected FsObject(FileType type, String name) { - this(type, name, 0L); - } - - public void setFilename(String filename) { - this.name = filename; - } - - public String getFilename() { - return name; - } - - public abstract void setAttr(SetAttr3 attr); - - public Nfs3FileAttributes getAttr() { - return attr; - } - - public WccAttr getWccAttr() { - return new WccAttr(getAttr().getSize(), new NfsTime(getAttr().getMtime()), new NfsTime(getAttr().getCtime())); - } - - public FileType getType() { - return type; - } - - public long getId() { - return id; - } - - @Override - public int compareTo(FsObject other) { - if(other.getType() == this.type) { - return (int) (this.getId() - other.getId()); - } - return -1; - } - -} diff --git a/src/test/java/org/apache/hadoop/fs/nfsv3/server/MockNfs3Filesystem.java b/src/test/java/org/apache/hadoop/fs/nfsv3/server/MockNfs3Filesystem.java deleted file mode 100644 index f1ab800..0000000 --- a/src/test/java/org/apache/hadoop/fs/nfsv3/server/MockNfs3Filesystem.java +++ /dev/null @@ -1,459 +0,0 @@ -/** - * Copyright 2014 NetApp Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.nfsv3.server; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; - -import org.apache.hadoop.fs.nfs.mount.MountMNTResponse; -import org.apache.hadoop.fs.nfsv3.server.FsObject.FileType; -import org.apache.hadoop.mount.MountResponse; -import org.apache.hadoop.nfs.NfsTime; -import org.apache.hadoop.nfs.nfs3.FileHandle; -import org.apache.hadoop.nfs.nfs3.Nfs3Constant; -import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; -import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; -import org.apache.hadoop.nfs.nfs3.Nfs3FileHandle; -import org.apache.hadoop.nfs.nfs3.Nfs3Status; -import org.apache.hadoop.nfs.nfs3.request.COMMIT3Request; -import org.apache.hadoop.nfs.nfs3.request.CREATE3Request; -import org.apache.hadoop.nfs.nfs3.request.FSINFO3Request; -import org.apache.hadoop.nfs.nfs3.request.GETATTR3Request; -import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request; -import org.apache.hadoop.nfs.nfs3.request.MKDIR3Request; -import org.apache.hadoop.nfs.nfs3.request.READ3Request; -import org.apache.hadoop.nfs.nfs3.request.READDIR3Request; -import org.apache.hadoop.nfs.nfs3.request.REMOVE3Request; -import org.apache.hadoop.nfs.nfs3.request.RENAME3Request; -import org.apache.hadoop.nfs.nfs3.request.RMDIR3Request; -import org.apache.hadoop.nfs.nfs3.request.SETATTR3Request; -import org.apache.hadoop.nfs.nfs3.request.SetAttr3; -import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; -import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response; -import org.apache.hadoop.nfs.nfs3.response.CREATE3Response; -import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response; -import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response; -import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response; -import org.apache.hadoop.nfs.nfs3.response.MKDIR3Response; -import org.apache.hadoop.nfs.nfs3.response.READ3Response; -import org.apache.hadoop.nfs.nfs3.response.READDIR3Response; -import org.apache.hadoop.nfs.nfs3.response.READDIR3Response.DirList3; -import org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3; -import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response; -import org.apache.hadoop.nfs.nfs3.response.RENAME3Response; -import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response; -import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response; -import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; -import org.apache.hadoop.nfs.nfs3.response.WccAttr; -import org.apache.hadoop.nfs.nfs3.response.WccData; -import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; - -public class MockNfs3Filesystem { - - final Map contents; - final DirectoryObject root; - public static final int MOCK_FSID = 1000; - - public MockNfs3Filesystem() { - contents = new HashMap(); - root = new DirectoryObject("/"); - contents.put(root.getId(), root); - } - - public MountMNTResponse getRoot() throws IOException { - return new MountMNTResponse(MountResponse.MNT_OK, new Nfs3FileHandle(root.getId()), new int[] {AuthFlavor.AUTH_NONE.getValue()}); - } - - public CREATE3Response CREATE(CREATE3Request request) throws IOException { - FileHandle handle = request.getHandle(); - String filename = request.getName(); - int mode = request.getMode(); - - if(contents.containsKey(handle.getFileId())) { - - // Check contents of the directory - DirectoryObject dirObject; - FileObject newFile = null; - - FsObject fsObject = contents.get(handle.getFileId()); - if(fsObject.getType() != FileType.TYPE_DIRECTORY) { - return new CREATE3Response(Nfs3Status.NFS3ERR_NOTDIR); - } - dirObject = (DirectoryObject) fsObject; - - for(FsObject obj : dirObject.listDirectoryContents()) { - if(obj.getFilename().equals(filename)) { - if(obj instanceof FileObject) { - newFile = (FileObject) obj; - break; - } else { - return new CREATE3Response(Nfs3Status.NFS3ERR_IO); - } - } - } - - // Don't check if the file already exists - if(mode == Nfs3Constant.CREATE_UNCHECKED) { - WccAttr preOpAttr = dirObject.getWccAttr(); - - if(newFile != null) { - dirObject.removeFileFromDirectory(newFile); - contents.remove(newFile.getId()); - } - - newFile = new FileObject(filename); - newFile.setAttr(request.getObjAttr()); - dirObject.addNewFile(newFile); - contents.put(newFile.getId(), newFile); - WccData dirWcc = new WccData(preOpAttr, dirObject.getAttr()); - return new CREATE3Response(Nfs3Status.NFS3_OK, new Nfs3FileHandle(newFile.getId()), newFile.getAttr(), dirWcc); - } - // Checks if the file already exists; error otherwise - else if(mode == Nfs3Constant.CREATE_GUARDED) { - if(newFile != null) { - return new CREATE3Response(Nfs3Status.NFS3ERR_EXIST); - } - else { - WccAttr preOpAttr = dirObject.getWccAttr(); - - newFile = new FileObject(filename); - newFile.setAttr(request.getObjAttr()); - dirObject.addNewFile(newFile); - contents.put(newFile.getId(), newFile); - WccData dirWcc = new WccData(preOpAttr, dirObject.getAttr()); - return new CREATE3Response(Nfs3Status.NFS3_OK, new Nfs3FileHandle(newFile.getId()), newFile.getAttr(), dirWcc); - } - } - // TODO: Exclusive creation not supported in this test - else { - return new CREATE3Response(Nfs3Status.NFS3ERR_IO); - } - } else { - return new CREATE3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } - - public COMMIT3Response COMMIT(COMMIT3Request request) throws IOException { - - FileHandle handle = request.getHandle(); - - if(contents.containsKey(handle.getFileId())) { - FsObject obj = contents.get(handle.getFileId()); - if(obj instanceof FileObject) { - FileObject file = (FileObject) obj; - WccAttr wccAttr = file.getWccAttr(); - return new COMMIT3Response(Nfs3Status.NFS3_OK, new WccData(wccAttr, file.getAttr()), 0L); - } else { - return new COMMIT3Response(Nfs3Status.NFS3ERR_IO); - } - } else { - return new COMMIT3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - - } - - public FSINFO3Response FSINFO(FSINFO3Request request) throws IOException { - FileHandle handle = request.getHandle(); - - if(contents.containsKey(handle.getFileId())) { - if(handle.getFileId() == root.getId()) { - int rtmax = 65536; - int rtpref = 4096; - int rtmult = 1; - int wtmax = 65536; - int wtpref = 4096; - int wtmult = 1; - int dtperf = 4096; - long maxfilesize = 10 * 1024 * 1024; - NfsTime delta = new NfsTime(1); - int properties = 0; - return new FSINFO3Response(Nfs3Status.NFS3_OK, new Nfs3FileAttributes(), - rtmax, rtpref, rtmult, wtmax, wtpref, wtmult, dtperf, maxfilesize, - delta, properties); - } else { - return new FSINFO3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } else { - return new FSINFO3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } - - public GETATTR3Response GETATTR(GETATTR3Request request) throws IOException { - FileHandle handle = request.getHandle(); - if(contents.containsKey(handle.getFileId())) { - FsObject obj = contents.get(handle.getFileId()); - return new GETATTR3Response(Nfs3Status.NFS3_OK, obj.getAttr()); - } else { - return new GETATTR3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } - - public LOOKUP3Response LOOKUP(LOOKUP3Request request) throws IOException { - FileHandle handle = request.getHandle(); - String name = request.getName(); - - if(contents.containsKey(handle.getFileId())) { - FsObject obj = contents.get(handle.getFileId()); - if(obj.getType() != FileType.TYPE_DIRECTORY) { - return new LOOKUP3Response(Nfs3Status.NFS3ERR_NOTDIR); - } else { - DirectoryObject dir = (DirectoryObject) obj; - FsObject objInDir = dir.getItemInDirectory(name); - if(objInDir == null) { - return new LOOKUP3Response(Nfs3Status.NFS3ERR_NOENT); - } else { - return new LOOKUP3Response(Nfs3Status.NFS3_OK, new Nfs3FileHandle(obj.getId()), objInDir.getAttr(), dir.getAttr()); - } - } - } else { - return new LOOKUP3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } - - public MKDIR3Response MKDIR(MKDIR3Request request) throws IOException { - FileHandle handle = request.getHandle(); - String dirName = request.getName(); - SetAttr3 attr = request.getObjAttr(); - WccAttr preAttr; - - if(contents.containsKey(handle.getFileId())) { - // Find parent directory - FsObject obj = contents.get(handle.getFileId()); - assert(obj.getType() == FileType.TYPE_DIRECTORY); - - // Create new directory - DirectoryObject newDir = new DirectoryObject(dirName); - newDir.setAttr(attr); - contents.put(newDir.getId(), newDir); - - // Add to parent - DirectoryObject parentDir = (DirectoryObject) obj; - preAttr = parentDir.getWccAttr(); - parentDir.addNewDirectory(newDir); - WccData postOpAttr = new WccData(preAttr, parentDir.getAttr()); - return new MKDIR3Response(Nfs3Status.NFS3_OK, new Nfs3FileHandle(newDir.getId()), newDir.getAttr(), postOpAttr); - } else { - return new MKDIR3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } - - public READ3Response READ(READ3Request request) throws IOException { - FileHandle handle = request.getHandle(); - long offset = request.getOffset(); - int count = request.getCount(); - - if(contents.containsKey(handle.getFileId())) { - FsObject fsObject = contents.get(handle.getFileId()); - - if(fsObject instanceof FileObject) { - FileObject fileObject = (FileObject) fsObject; - byte[] readData = new byte[FsObject.MAX_FILE_SIZE]; - int readBytes; - try { - readBytes = fileObject.read(offset, count, readData); - } catch(IOException exception) { - return new READ3Response(Nfs3Status.NFS3ERR_INVAL); - } - return new READ3Response(Nfs3Status.NFS3_OK, fileObject.getAttr(), readBytes, ((offset + count) >= fileObject.getSize()), ByteBuffer.wrap(readData)); - } - else { - return new READ3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } else { - return new READ3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } - - public READDIR3Response READDIR(READDIR3Request request) throws IOException { - FileHandle handle = request.getHandle(); - long cookie = request.getCookie(); - - if(contents.containsKey(handle.getFileId())) { - FsObject fsObject = contents.get(handle.getFileId()); - if(fsObject instanceof DirectoryObject) { - DirectoryObject dirObject = (DirectoryObject) fsObject; - Set entries = new TreeSet(dirObject.listDirectoryContents()); - - int position = 0; - Entry3[] array = new Entry3[6]; - for(FsObject entry : entries) { - if(entry.getId() > cookie) { - array[position] = new Entry3(entry.getId(), entry.getFilename(), entry.getId()); - position++; - if(position >= 5) { - break; - } - } - } - - DirList3 dirList; - if(position == 0) { - dirList = new DirList3(null, true); - } else { - dirList = new DirList3(Arrays.copyOf(array, position), false); - } - - return new READDIR3Response(Nfs3Status.NFS3_OK, dirObject.getAttr(), 0L, dirList); - - } else { - return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR); - } - } else { - return new READDIR3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } - - public RENAME3Response RENAME(RENAME3Request request) throws IOException { - FileHandle fromDirHandle = request.getFromDirHandle(); - String fromName = request.getFromName(); - FileHandle toDirHandle = request.getToDirHandle(); - String toName = request.getToName(); - - if(contents.containsKey(fromDirHandle.getFileId()) && contents.containsKey(toDirHandle.getFileId())) { - DirectoryObject fsFromDir = (DirectoryObject) contents.get(fromDirHandle.getFileId()); - DirectoryObject fsToDir = (DirectoryObject) contents.get(toDirHandle.getFileId()); - - // Must exist in old directory - if(fsFromDir.getItemInDirectory(fromName) == null) { - return new RENAME3Response(Nfs3Status.NFS3ERR_NOENT); - } - - // Must not exist in new directory - if(fsToDir.getItemInDirectory(toName) != null) { - return new RENAME3Response(Nfs3Status.NFS3ERR_EXIST); - } - - // Remove it from old one - for(FsObject obj : fsFromDir.listDirectoryContents()) { - if(obj.getFilename().equals(fromName)) { - if(obj instanceof FileObject) { - FileObject file = (FileObject) obj; - WccAttr fromPreOpAttr = fsFromDir.getWccAttr(); - WccAttr toPreOpAttr = fsToDir.getWccAttr(); - fsFromDir.removeFileFromDirectory(file); - fsToDir.addNewFile(file); - return new RENAME3Response(Nfs3Status.NFS3_OK, new WccData(fromPreOpAttr, fsFromDir.getAttr()), new WccData(toPreOpAttr, fsToDir.getAttr())); - } else { - return new RENAME3Response(Nfs3Status.NFS3ERR_IO); - } - } - } - - return new RENAME3Response(Nfs3Status.NFS3ERR_NOENT); - - } else { - return new RENAME3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - - } - - public REMOVE3Response REMOVE(REMOVE3Request request) throws IOException { - FileHandle dirHandle = request.getHandle(); - String name = request.getName(); - - if(contents.containsKey(dirHandle.getFileId())) { - FsObject obj = contents.get(dirHandle.getFileId()); - if(obj instanceof DirectoryObject) { - DirectoryObject dir = (DirectoryObject) obj; - for(FsObject f : dir.listDirectoryContents()) { - if(f.getFilename().equals(name) && (f instanceof FileObject)) { - WccAttr preOpAttr = dir.getWccAttr(); - dir.removeFileFromDirectory((FileObject) f); - return new REMOVE3Response(Nfs3Status.NFS3_OK, new WccData(preOpAttr, dir.getAttr())); - } - } - return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT); - } else { - return new REMOVE3Response(Nfs3Status.NFS3ERR_NOTDIR); - } - } else { - return new REMOVE3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } - - public RMDIR3Response RMDIR(RMDIR3Request request) throws IOException { - FileHandle dirHandle = request.getHandle(); - String name = request.getName(); - - if(contents.containsKey(dirHandle.getFileId())) { - FsObject obj = contents.get(dirHandle.getFileId()); - if(obj instanceof DirectoryObject) { - DirectoryObject dir = (DirectoryObject) obj; - for(FsObject f : dir.listDirectoryContents()) { - if(f.getFilename().equals(name) && (f instanceof DirectoryObject)) { - WccAttr preOpAttr = dir.getWccAttr(); - dir.removeDirectoryFromDirectory((DirectoryObject) f); - return new RMDIR3Response(Nfs3Status.NFS3_OK, new WccData(preOpAttr, dir.getAttr())); - } - } - return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT); - } else { - return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR); - } - } else { - return new RMDIR3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } - - public SETATTR3Response SETATTR(SETATTR3Request request) throws IOException { - FileHandle handle = request.getHandle(); - SetAttr3 attr = request.getAttr(); - @SuppressWarnings("unused") - boolean check = request.isCheck(); - - if(contents.containsKey(handle.getFileId())) { - FsObject obj = contents.get(handle.getFileId()); - WccAttr preOpAttr = obj.getWccAttr(); - obj.setAttr(attr); - return new SETATTR3Response(Nfs3Status.NFS3_OK, new WccData(preOpAttr, obj.getAttr())); - } else { - return new SETATTR3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } - - public WRITE3Response WRITE(WRITE3Request request) throws IOException { - FileHandle handle = request.getHandle(); - long offset = request.getOffset(); - int count = request.getCount(); - ByteBuffer data = request.getData(); - - if(contents.containsKey(handle.getFileId())) { - FsObject fsObject = contents.get(handle.getFileId()); - if(fsObject instanceof FileObject) { - FileObject fileObject = (FileObject) fsObject; - WccAttr preOpAttr = fileObject.getWccAttr(); - try { - fileObject.write(offset, count, data); - } catch(IOException exception) { - return new WRITE3Response(Nfs3Status.NFS3ERR_FBIG); - } - WccData fileWcc = new WccData(preOpAttr, fileObject.getAttr()); - return new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, WriteStableHow.FILE_SYNC, 0L); - } - else { - return new WRITE3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - } else { - return new WRITE3Response(Nfs3Status.NFS3ERR_BADHANDLE); - } - - } - -} diff --git a/src/test/java/org/apache/hadoop/fs/nfsv3/server/MockNfs3Server.java b/src/test/java/org/apache/hadoop/fs/nfsv3/server/MockNfs3Server.java deleted file mode 100644 index 15bfbeb..0000000 --- a/src/test/java/org/apache/hadoop/fs/nfsv3/server/MockNfs3Server.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2014 NetApp Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.nfsv3.server; - -import java.io.IOException; -import java.net.ServerSocket; -import java.net.Socket; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -public class MockNfs3Server implements Runnable { - - final int serverPort; - final ExecutorService threadPool; - final MockNfs3Filesystem filesystem; - final ServerSocket serverSocket; - final boolean broken; - - public final static Logger LOG = LoggerFactory.getLogger(MockNfs3Server.class); - - public MockNfs3Server(boolean broken, int port) throws IOException { - threadPool = Executors.newFixedThreadPool(128); - serverPort = port; - filesystem = new MockNfs3Filesystem(); - serverSocket = new ServerSocket(serverPort); - this.broken = broken; - } - - @Override - public void run() { - try { - int threadId = 1; - while(!serverSocket.isClosed()) { - Socket clientSocket = serverSocket.accept(); - LOG.info("Launched NFS handler thread " + threadId); - threadPool.execute(new MockNfs3ServerHandler(filesystem, clientSocket, broken)); - threadId++; - } - threadPool.shutdown(); - } catch(IOException exception) { - exception.printStackTrace(); - LOG.error("Got an IOException in the accept loop"); - } - } - -} diff --git a/src/test/java/org/apache/hadoop/fs/nfsv3/server/MockNfs3ServerHandler.java b/src/test/java/org/apache/hadoop/fs/nfsv3/server/MockNfs3ServerHandler.java deleted file mode 100644 index 2293f97..0000000 --- a/src/test/java/org/apache/hadoop/fs/nfsv3/server/MockNfs3ServerHandler.java +++ /dev/null @@ -1,267 +0,0 @@ -/** - * Copyright 2014 NetApp Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.nfsv3.server; - -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.IOException; -import java.net.Socket; -import java.nio.ByteBuffer; - -import org.apache.commons.lang.ArrayUtils; -import org.apache.hadoop.fs.nfs.mount.MountClient; -import org.apache.hadoop.fs.nfs.mount.MountMNTResponse; -import org.apache.hadoop.mount.MountInterface; -import org.apache.hadoop.nfs.nfs3.Nfs3Constant; -import org.apache.hadoop.nfs.nfs3.Nfs3Status; -import org.apache.hadoop.nfs.nfs3.request.COMMIT3Request; -import org.apache.hadoop.nfs.nfs3.request.CREATE3Request; -import org.apache.hadoop.nfs.nfs3.request.FSINFO3Request; -import org.apache.hadoop.nfs.nfs3.request.GETATTR3Request; -import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request; -import org.apache.hadoop.nfs.nfs3.request.MKDIR3Request; -import org.apache.hadoop.nfs.nfs3.request.READ3Request; -import org.apache.hadoop.nfs.nfs3.request.READDIR3Request; -import org.apache.hadoop.nfs.nfs3.request.REMOVE3Request; -import org.apache.hadoop.nfs.nfs3.request.RENAME3Request; -import org.apache.hadoop.nfs.nfs3.request.RMDIR3Request; -import org.apache.hadoop.nfs.nfs3.request.SETATTR3Request; -import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; -import org.apache.hadoop.nfs.nfs3.response.NFS3Response; -import org.apache.hadoop.oncrpc.RpcAcceptedReply; -import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState; -import org.apache.hadoop.oncrpc.RpcCall; -import org.apache.hadoop.oncrpc.RpcDeniedReply; -import org.apache.hadoop.oncrpc.RpcDeniedReply.RejectState; -import org.apache.hadoop.oncrpc.RpcReply.ReplyState; -import org.apache.hadoop.oncrpc.XDR; -import org.apache.hadoop.oncrpc.security.CredentialsNone; -import org.apache.hadoop.oncrpc.security.Verifier; -import org.apache.hadoop.oncrpc.security.VerifierNone; -import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; -import org.jboss.netty.buffer.ChannelBuffer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class MockNfs3ServerHandler implements Runnable { - - final Socket socket; - final MockNfs3Filesystem filesystem; - final boolean broken; - - public final static Logger LOG = LoggerFactory.getLogger(MockNfs3ServerHandler.class); - - public MockNfs3ServerHandler(MockNfs3Filesystem filesystem, Socket socket, boolean broken) throws IOException { - this.socket = socket; - this.filesystem = filesystem; - this.broken = broken; - } - - @Override - public void run() { - try { - - LOG.info("Nfs3 server handler thread is running"); - - BufferedOutputStream oos = - new BufferedOutputStream(socket.getOutputStream()); - BufferedInputStream ois = - new BufferedInputStream(socket.getInputStream()); - - while(!socket.isClosed()) { - - byte[] requestBuffer = null; - byte[] fragmentHeader = new byte[4]; - @SuppressWarnings("unused") - long totalReadBytes = 0; - - // Read an entire RPC request message - while(true) { - - LOG.info("Reading fragment header"); - // The first 4 bytes are the record header - if (ois.read(fragmentHeader, 0, 4) != 4) { - throw new IOException("Could not read fragment header"); - } - LOG.info("Finished reading fragment header"); - - // The first bit is the flag to tell whether this is the last fragment - // The left 31 bits are the length of this segment - int fragmentHeaderInt = ByteBuffer.wrap(fragmentHeader).getInt(); - int lastFragmentFlag = fragmentHeaderInt & (1 << 31); - int length = fragmentHeaderInt & (~(1 << 31)); - - // Do multiple reads since a fragment could be larger than the maximal - // allowed TCP packet - byte[] fragment = new byte[length]; - int readbytes = 0; - - while (readbytes < length) { - int curReadBytes = ois.read(fragment, readbytes, length - readbytes); - readbytes += curReadBytes; - LOG.info("Read " + readbytes + " of a total size of " + length); - } - - if (readbytes != length) { - throw new IOException("Did not read expected number of bytes readbytes=" + readbytes + " length=" + length); - } - totalReadBytes += readbytes; - - // Concatenate fragments together - if (requestBuffer == null) { - requestBuffer = fragment.clone(); - } else { - requestBuffer = ArrayUtils.addAll(requestBuffer, fragment); - } - - // Stop if we have reached the last fragment - if (lastFragmentFlag != 0) { - break; - } - } - - // Make the RPC Call object - XDR callXdr = new XDR(requestBuffer); - RpcCall rpcCall = RpcCall.read(callXdr); - LOG.info("Converted incoming bytes to a RPC call object"); - - // Make XDR for reply - XDR replyXdr = new XDR(); - - // Check RPC version - if(rpcCall.getRpcVersion() != RpcCall.RPC_VERSION) { - RpcDeniedReply reply = new RpcDeniedReply(rpcCall.getXid(), ReplyState.MSG_DENIED, RejectState.RPC_MISMATCH, new VerifierNone()); - reply.write(replyXdr); - } - else if(!(rpcCall.getCredential() instanceof CredentialsNone)) { - RpcDeniedReply reply = new RpcDeniedReply(rpcCall.getXid(), ReplyState.MSG_DENIED, RejectState.AUTH_ERROR, new VerifierNone()); - reply.write(replyXdr); - } - else if(rpcCall.getProgram() != Nfs3Constant.PROGRAM || rpcCall.getVersion() != Nfs3Constant.VERSION) { - // FIXME Check for portmap - // Check for MOUNT - if(rpcCall.getProgram() == MountClient.MOUNTD_PROGRAM && rpcCall.getVersion() == MountClient.MOUNTD_VERSION) { - String mountPath = callXdr.readString(); - RpcAcceptedReply reply = RpcAcceptedReply.getInstance(rpcCall.getXid(), AcceptState.SUCCESS, Verifier.VERIFIER_NONE); - reply.write(replyXdr); - // Mount response - if(mountPath.equals("/mountpath")) { - MountMNTResponse mountResponse = mount(rpcCall, callXdr); - replyXdr.writeInt(mountResponse.getStatus()); - mountResponse.getFilehandle().serialize(replyXdr); - replyXdr.writeInt(1); - replyXdr.writeInt(AuthFlavor.AUTH_NONE.getValue()); - } else { - MountMNTResponse mountResponse = mount(rpcCall, callXdr); - replyXdr.writeInt(mountResponse.getStatus()); - mountResponse.getFilehandle().serialize(replyXdr); - replyXdr.writeInt(0); - } - } else { - RpcAcceptedReply reply = RpcAcceptedReply.getInstance(rpcCall.getXid(), AcceptState.PROG_MISMATCH, Verifier.VERIFIER_NONE); - reply.write(replyXdr); - } - } - else { - if(!broken) { - LOG.info("Basics of the RPC checks out, call the RPC program to process the call"); - NFS3Response nfsResponse = handleNFS(rpcCall, callXdr); - nfsResponse.writeHeaderAndResponse(replyXdr, rpcCall.getXid(), Verifier.VERIFIER_NONE); - } else { - RpcAcceptedReply reply = RpcAcceptedReply.getInstance(rpcCall.getXid(), AcceptState.PROC_UNAVAIL, Verifier.VERIFIER_NONE); - reply.write(replyXdr); - } - } - - // Send reply to socket - ChannelBuffer buffer = XDR.writeMessageTcp(replyXdr, true); - oos.write(buffer.toByteBuffer().array()); - oos.flush(); - LOG.info("Sent reply of " + buffer.toByteBuffer().array().length + " bytes"); - } - - } catch(IOException exception) { - LOG.error("Could not handle request"); - //exception.printStackTrace(); - } - - } - - private MountMNTResponse mount(RpcCall call, XDR callXdr) throws IOException { - assert(call.getProgram() == MountClient.MOUNTD_PROGRAM); - assert(call.getVersion() == MountClient.MOUNTD_VERSION); - - if(call.getProcedure() == MountInterface.MNTPROC.MNT.getValue()) { - //callXdr.readString(); - return filesystem.getRoot(); - } else { - throw new IOException("Not a valid MOUNT3 procedure"); - } - - } - - private NFS3Response handleNFS(RpcCall call, XDR callXdr) throws IOException { - assert(call.getProgram() == Nfs3Constant.PROGRAM); - assert(call.getVersion() == Nfs3Constant.VERSION); - - if(call.getProcedure() == Nfs3Constant.NFSPROC3.CREATE.getValue()) { - CREATE3Request request = new CREATE3Request(callXdr); - return filesystem.CREATE(request); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.COMMIT.getValue()) { - COMMIT3Request request = new COMMIT3Request(callXdr); - return filesystem.COMMIT(request); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.NULL.getValue()) { - return new NFS3Response(Nfs3Status.NFS3_OK); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.FSINFO.getValue()) { - FSINFO3Request request = new FSINFO3Request(callXdr); - return filesystem.FSINFO(request); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.MKDIR.getValue()) { - MKDIR3Request request = new MKDIR3Request(callXdr); - return filesystem.MKDIR(request); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.LOOKUP.getValue()) { - LOOKUP3Request request = new LOOKUP3Request(callXdr); - return filesystem.LOOKUP(request); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.GETATTR.getValue()) { - GETATTR3Request request = new GETATTR3Request(callXdr); - return filesystem.GETATTR(request); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.READ.getValue()) { - READ3Request request = new READ3Request(callXdr); - return filesystem.READ(request); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.READDIR.getValue()) { - READDIR3Request request = new READDIR3Request(callXdr); - return filesystem.READDIR(request); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.RENAME.getValue()) { - RENAME3Request request = new RENAME3Request(callXdr); - return filesystem.RENAME(request); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.REMOVE.getValue()) { - REMOVE3Request request = new REMOVE3Request(callXdr); - return filesystem.REMOVE(request); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.RMDIR.getValue()) { - RMDIR3Request request = new RMDIR3Request(callXdr); - return filesystem.RMDIR(request); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.SETATTR.getValue()) { - SETATTR3Request request = new SETATTR3Request(callXdr); - return filesystem.SETATTR(request); - } else if(call.getProcedure() == Nfs3Constant.NFSPROC3.WRITE.getValue()) { - WRITE3Request request = new WRITE3Request(callXdr); - return filesystem.WRITE(request); - } else { - throw new IOException("Not a valid NFS3 procedure"); - } - - } - - -} diff --git a/src/test/resources/contract-test-options.xml.template b/src/test/resources/contract-test-options.xml.template deleted file mode 100644 index 916e803..0000000 --- a/src/test/resources/contract-test-options.xml.template +++ /dev/null @@ -1,24 +0,0 @@ - - - - - fs.defaultFS - nfs://localhost:2049/ - - - fs.nfs.mountdir - /exporteddir/ - - - fs.nfs.impl - org.apache.hadoop.fs.nfs.NFSv3FileSystem - - - fs.AbstractFileSystem.nfs.impl - org.apache.hadoop.fs.nfs.NFSv3AbstractFilesystem - - - fs.nfs.auth.flavor - AUTH_NONE - - diff --git a/src/test/resources/contract/nfs-contract.xml b/src/test/resources/contract/nfs-contract.xml new file mode 100644 index 0000000..0db104c --- /dev/null +++ b/src/test/resources/contract/nfs-contract.xml @@ -0,0 +1,94 @@ + + + + + + + fs.contract.test.root-tests-enabled + true + + + + fs.file.contract.test.random-seek-count + 500 + + + + fs.contract.is-case-sensitive + true + + + + fs.contract.supports-append + true + + + + fs.contract.supports-atomic-directory-delete + true + + + + fs.contract.supports-atomic-rename + true + + + + fs.contract.supports-block-locality + true + + + + fs.contract.supports-concat + true + + + + fs.contract.supports-seek + true + + + + fs.contract.rejects-seek-past-eof + true + + + + fs.contract.supports-strict-exceptions + true + + + + fs.contract.supports-unix-permissions + true + + + + fs.contract.rename-returns-false-if-dest-exists + true + + + + fs.contract.rename-returns-false-if-source-missing + true + + + \ No newline at end of file diff --git a/src/test/resources/nfs-test-mapping.json.template b/src/test/resources/nfs-test-mapping.json.template new file mode 100644 index 0000000..e034151 --- /dev/null +++ b/src/test/resources/nfs-test-mapping.json.template @@ -0,0 +1,62 @@ +{ + "spaces": [ + { + "name": "laptop", + "uri": "nfs://localhost:2049/", + "options": { + "nfsExportPath": "/Volumes/export1", + "nfsReadSizeBits": 20, + "nfsWriteSizeBits": 20, + "nfsSplitSizeBits": 28, + "nfsAuthScheme": "AUTH_SYS", + "nfsUsername": "root", + "nfsGroupname": "root", + "nfsUid": 0, + "nfsGid": 0, + "nfsPort": 2049, + "nfsMountPort": -1, + "nfsRpcbindPort": 111 + }, + "endpoints": [ + { + "host": "nfs://localhost:2049", + "exportPath": "/Volumes/export2", + "path": "/export2" + }, + { + "host": "nfs://127.0.0.1:2049", + "exportPath": "/Volumes/export2", + "path": "/export2" + } + ] + }, + { + "name": "one-svm-two-junctions", + "uri": "nfs://primary-ip-address:2049/", + "options": { + "nfsExportPath": "/", + "nfsReadSizeBits": 20, + "nfsWriteSizeBits": 20, + "nfsSplitSizeBits": 28, + "nfsAuthScheme": "AUTH_SYS", + "nfsUsername": "root", + "nfsGroupname": "root", + "nfsUid": 0, + "nfsGid": 0, + "nfsPort": 2049, + "nfsMountPort": -1, + "nfsRpcbindPort": 111 + }, + "endpoints": [ + { + "host": "nfs://ip-of-controller-one:2049/", + "path": "/junctioned-volume-one/" + }, + { + "host": "nfs://ip-of-controller-two:2049/", + "path": "/junctioned-volume-two/" + } + ] + } + ] +} diff --git a/src/test/resources/nfs-test-options.xml.template b/src/test/resources/nfs-test-options.xml.template new file mode 100644 index 0000000..766f54b --- /dev/null +++ b/src/test/resources/nfs-test-options.xml.template @@ -0,0 +1,40 @@ + + + + + + + + + fs.contract.test.fs.nfs + nfs://localhost:2049/ + + + fs.defaultFS + nfs://localhost:2049/ + + + fs.nfs.configuration + nfs-test-mapping.json + + + fs.nfs.impl + org.apache.hadoop.fs.nfs.NFSv3FileSystem + + + fs.AbstractFileSystemnfs.impl + org.apache.hadoop.fs.nfs.NFSv3AbstractFileSystem + +