Skip to content

Commit

Permalink
[apache#5731] feat(auth-ranger): RangerAuthorizationHDFSPlugin suppor…
Browse files Browse the repository at this point in the history
…ts Fileset authorization (apache#5733)

### What changes were proposed in this pull request?

RangerAuthorizationHDFSPlugin supports Fileset authorization

### Why are the changes needed?

Fix: apache#5731

### Does this PR introduce _any_ user-facing change?

Addition property keys in Fileset

### How was this patch tested?

ITs

---------

Co-authored-by: theoryxu <theoryxu@tencent.com>
  • Loading branch information
theoryxu and theoryxu authored Dec 13, 2024
1 parent 5673764 commit b151461
Show file tree
Hide file tree
Showing 19 changed files with 1,376 additions and 151 deletions.
2 changes: 1 addition & 1 deletion authorizations/authorization-ranger/build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ tasks.test {
doFirst {
environment("HADOOP_USER_NAME", "gravitino")
}
dependsOn(":catalogs:catalog-hive:jar", ":catalogs:catalog-hive:runtimeJars", ":catalogs:catalog-lakehouse-iceberg:jar", ":catalogs:catalog-lakehouse-iceberg:runtimeJars", ":catalogs:catalog-lakehouse-paimon:jar", ":catalogs:catalog-lakehouse-paimon:runtimeJars")
dependsOn(":catalogs:catalog-hive:jar", ":catalogs:catalog-hive:runtimeJars", ":catalogs:catalog-lakehouse-iceberg:jar", ":catalogs:catalog-lakehouse-iceberg:runtimeJars", ":catalogs:catalog-lakehouse-paimon:jar", ":catalogs:catalog-lakehouse-paimon:runtimeJars", ":catalogs:catalog-hadoop:jar", ":catalogs:catalog-hadoop:runtimeJars")

val skipITs = project.hasProperty("skipITs")
if (skipITs) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ protected AuthorizationPlugin newPlugin(
case "lakehouse-iceberg":
case "lakehouse-paimon":
return RangerAuthorizationHadoopSQLPlugin.getInstance(metalake, config);
case "hadoop":
return RangerAuthorizationHDFSPlugin.getInstance(metalake, config);
default:
throw new IllegalArgumentException("Unknown catalog provider: " + catalogProvider);
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,252 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.gravitino.authorization.ranger;

import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.gravitino.GravitinoEnv;
import org.apache.gravitino.MetadataObject;
import org.apache.gravitino.NameIdentifier;
import org.apache.gravitino.authorization.AuthorizationMetadataObject;
import org.apache.gravitino.authorization.AuthorizationPrivilege;
import org.apache.gravitino.authorization.AuthorizationSecurableObject;
import org.apache.gravitino.authorization.Privilege;
import org.apache.gravitino.authorization.SecurableObject;
import org.apache.gravitino.authorization.SecurableObjects;
import org.apache.gravitino.authorization.ranger.reference.RangerDefines;
import org.apache.gravitino.catalog.FilesetDispatcher;
import org.apache.gravitino.exceptions.AuthorizationPluginException;
import org.apache.gravitino.file.Fileset;
import org.apache.ranger.plugin.model.RangerPolicy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class RangerAuthorizationHDFSPlugin extends RangerAuthorizationPlugin {
private static final Logger LOG = LoggerFactory.getLogger(RangerAuthorizationHDFSPlugin.class);

private static final Pattern pattern = Pattern.compile("^hdfs://[^/]*");

private static volatile RangerAuthorizationHDFSPlugin instance = null;

private RangerAuthorizationHDFSPlugin(String metalake, Map<String, String> config) {
super(metalake, config);
}

public static synchronized RangerAuthorizationHDFSPlugin getInstance(
String metalake, Map<String, String> config) {
if (instance == null) {
synchronized (RangerAuthorizationHadoopSQLPlugin.class) {
if (instance == null) {
instance = new RangerAuthorizationHDFSPlugin(metalake, config);
}
}
}
return instance;
}

@Override
public Map<Privilege.Name, Set<AuthorizationPrivilege>> privilegesMappingRule() {
return ImmutableMap.of(
Privilege.Name.READ_FILESET,
ImmutableSet.of(
RangerPrivileges.RangerHdfsPrivilege.READ,
RangerPrivileges.RangerHdfsPrivilege.EXECUTE),
Privilege.Name.WRITE_FILESET,
ImmutableSet.of(
RangerPrivileges.RangerHdfsPrivilege.WRITE,
RangerPrivileges.RangerHdfsPrivilege.EXECUTE));
}

@Override
public Set<AuthorizationPrivilege> ownerMappingRule() {
return ImmutableSet.of(
RangerPrivileges.RangerHdfsPrivilege.READ,
RangerPrivileges.RangerHdfsPrivilege.WRITE,
RangerPrivileges.RangerHdfsPrivilege.EXECUTE);
}

@Override
public List<String> policyResourceDefinesRule() {
return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
}

@Override
protected RangerPolicy createPolicyAddResources(AuthorizationMetadataObject metadataObject) {
RangerPolicy policy = new RangerPolicy();
policy.setService(rangerServiceName);
policy.setName(metadataObject.fullName());
RangerPolicy.RangerPolicyResource policyResource =
new RangerPolicy.RangerPolicyResource(metadataObject.names().get(0), false, true);
policy.getResources().put(RangerDefines.PolicyResource.PATH.getName(), policyResource);
return policy;
}

@Override
public AuthorizationSecurableObject generateAuthorizationSecurableObject(
List<String> names,
AuthorizationMetadataObject.Type type,
Set<AuthorizationPrivilege> privileges) {
AuthorizationMetadataObject authMetadataObject =
new RangerPathBaseMetadataObject(AuthorizationMetadataObject.getLastName(names), type);
authMetadataObject.validateAuthorizationMetadataObject();
return new RangerPathBaseSecurableObject(
authMetadataObject.name(), authMetadataObject.type(), privileges);
}

@Override
public Set<Privilege.Name> allowPrivilegesRule() {
return ImmutableSet.of(
Privilege.Name.CREATE_FILESET, Privilege.Name.READ_FILESET, Privilege.Name.WRITE_FILESET);
}

@Override
public Set<MetadataObject.Type> allowMetadataObjectTypesRule() {
return ImmutableSet.of(
MetadataObject.Type.FILESET,
MetadataObject.Type.SCHEMA,
MetadataObject.Type.CATALOG,
MetadataObject.Type.METALAKE);
}

@Override
public List<AuthorizationSecurableObject> translatePrivilege(SecurableObject securableObject) {
List<AuthorizationSecurableObject> rangerSecurableObjects = new ArrayList<>();

securableObject.privileges().stream()
.filter(Objects::nonNull)
.forEach(
gravitinoPrivilege -> {
Set<AuthorizationPrivilege> rangerPrivileges = new HashSet<>();
// Ignore unsupported privileges
if (!privilegesMappingRule().containsKey(gravitinoPrivilege.name())) {
return;
}
privilegesMappingRule().get(gravitinoPrivilege.name()).stream()
.forEach(
rangerPrivilege ->
rangerPrivileges.add(
new RangerPrivileges.RangerHivePrivilegeImpl(
rangerPrivilege, gravitinoPrivilege.condition())));

switch (gravitinoPrivilege.name()) {
case CREATE_FILESET:
// Ignore the Gravitino privilege `CREATE_FILESET` in the
// RangerAuthorizationHDFSPlugin
break;
case READ_FILESET:
case WRITE_FILESET:
switch (securableObject.type()) {
case METALAKE:
case CATALOG:
case SCHEMA:
break;
case FILESET:
rangerSecurableObjects.add(
generateAuthorizationSecurableObject(
translateMetadataObject(securableObject).names(),
RangerPathBaseMetadataObject.Type.PATH,
rangerPrivileges));
break;
default:
throw new AuthorizationPluginException(
"The privilege %s is not supported for the securable object: %s",
gravitinoPrivilege.name(), securableObject.type());
}
break;
default:
LOG.warn(
"RangerAuthorizationHDFSPlugin -> privilege {} is not supported for the securable object: {}",
gravitinoPrivilege.name(),
securableObject.type());
}
});

return rangerSecurableObjects;
}

@Override
public List<AuthorizationSecurableObject> translateOwner(MetadataObject gravitinoMetadataObject) {
List<AuthorizationSecurableObject> rangerSecurableObjects = new ArrayList<>();
switch (gravitinoMetadataObject.type()) {
case METALAKE:
case CATALOG:
case SCHEMA:
return rangerSecurableObjects;
case FILESET:
rangerSecurableObjects.add(
generateAuthorizationSecurableObject(
translateMetadataObject(gravitinoMetadataObject).names(),
RangerPathBaseMetadataObject.Type.PATH,
ownerMappingRule()));
break;
default:
throw new AuthorizationPluginException(
"The owner privilege is not supported for the securable object: %s",
gravitinoMetadataObject.type());
}

return rangerSecurableObjects;
}

@Override
public AuthorizationMetadataObject translateMetadataObject(MetadataObject metadataObject) {
Preconditions.checkArgument(
allowMetadataObjectTypesRule().contains(metadataObject.type()),
String.format(
"The metadata object type %s is not supported in the RangerAuthorizationHDFSPlugin",
metadataObject.type()));
List<String> nsMetadataObject =
Lists.newArrayList(SecurableObjects.DOT_SPLITTER.splitToList(metadataObject.fullName()));
Preconditions.checkArgument(
nsMetadataObject.size() > 0, "The metadata object must have at least one name.");

if (metadataObject.type() == MetadataObject.Type.FILESET) {
RangerPathBaseMetadataObject rangerHDFSMetadataObject =
new RangerPathBaseMetadataObject(
getFileSetPath(metadataObject), RangerPathBaseMetadataObject.Type.PATH);
rangerHDFSMetadataObject.validateAuthorizationMetadataObject();
return rangerHDFSMetadataObject;
} else {
return new RangerPathBaseMetadataObject("", RangerPathBaseMetadataObject.Type.PATH);
}
}

public String getFileSetPath(MetadataObject metadataObject) {
FilesetDispatcher filesetDispatcher = GravitinoEnv.getInstance().filesetDispatcher();
NameIdentifier identifier =
NameIdentifier.parse(String.format("%s.%s", metalake, metadataObject.fullName()));
Fileset fileset = filesetDispatcher.loadFileset(identifier);
Preconditions.checkArgument(
fileset != null, String.format("Fileset %s is not found", identifier));
String filesetLocation = fileset.storageLocation();
Preconditions.checkArgument(
filesetLocation != null, String.format("Fileset %s location is not found", identifier));
return pattern.matcher(filesetLocation).replaceAll("");
}
}
Loading

0 comments on commit b151461

Please sign in to comment.