当前位置: 首页 > 知识库问答 >
问题:

使用JAVA API授予hdfs目标目录的权限

李胤
2023-03-14

当我使用JAVA API将数据放入hdfs时,我必须为hdfs目标目录提供许可。我尝试了以下代码:

import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;`enter code here`
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URI;
import java.net.URL;
import java.util.Scanner;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.thriftfs.api.IOException;

public class MovetoHdfs {

    public static void main(String[] args)throws Exception {
        MovetoHdfs m=new MovetoHdfs();
        Configuration conf1=new Configuration();
        //Scanner s=new Scanner(System.in);
        BufferedReader br=new BufferedReader(new InputStreamReader(System.in));

        System.out.println("Enter the input path");
        String inp=br.readLine();

        System.out.println("Enter output path");

        String opt=br.readLine();

        //m.moveFile(conf1,inp,opt);

        conf1.set("fs.default.name", "hdfs://10.2.152.113:8020");
        conf1.set("dfs.permissions.enabled", "false");
        conf1.set("dfs.permissions", "false");

        FileSystem fs= FileSystem.get(conf1);
        m.moveFile(fs,inp,opt);
    }

    public void moveFile(Configuration conf,final String inputFile, final String outputFile) throws Exception {

        FileSystem fs=null;
        conf=new Configuration();
        conf.set("fs.default.name", "hdfs://10.2.152.113:8020");
        conf.set("dfs.permissions.enabled", "false");
        conf.set("dfs.permissions", "false");
        Path src=new Path(inputFile);

        Path dest=new Path(outputFile);

        fs=FileSystem.get(conf);
        //fs.setPermission(dest, fp);
        fs.moveFromLocalFile(src, dest);
        fs.close();

    }

    public void moveFile(final String inputFile, final String outputFile) throws Exception {
        Configuration conf;

        conf=new Configuration();

        Path src=new Path(inputFile);

        Path dest=new Path(outputFile);

        FileSystem dfs=FileSystem.get(new URI("hdfs://10.2.152.113:8020"),conf);
        conf.set("dfs.permissions.enabled", "false");
        conf.set("dfs.permissions", "false");
        dfs.moveFromLocalFile(src, dest);
        dfs.close();
    }
}

我有以下错误

log4j:WARN No appenders could be found for logger (org.apache.hadoop.conf.Configuration.deprecation).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
org.apache.hadoop.security.AccessControlException: Permission denied: user=Srinivas.budida, access=WRITE, inode="/user/hdfs/Test":root:hadoop:-rw-r--r--
    at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkFsPermission(DefaultAuthorizationProvider.java:257)
    at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.check(DefaultAuthorizationProvider.java:238)
    at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkPermission(DefaultAuthorizationProvider.java:151)
    at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:138)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6286)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6268)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPathAccess(FSNamesystem.java:6193)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:2621)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2545)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2430)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:551)
    at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.create(AuthorizationProviderProxyClientProtocol.java:108)
    at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:388)
    at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
    at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:587)
    at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1026)
    at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2013)
    at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2009)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Subject.java:415)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
    at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2007)

    at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
    at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
    at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
    at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
    at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106)
    at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:73)
    at org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:1603)
    at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1461)
    at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1386)
    at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:394)
    at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:390)
    at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
    at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:390)
    at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:334)
    at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:906)
    at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:887)
    at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:784)
    at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:365)
    at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:338)
    at org.apache.hadoop.fs.FileSystem.copyFromLocalFile(FileSystem.java:1903)
    at org.apache.hadoop.fs.FileSystem.copyFromLocalFile(FileSystem.java:1871)
    at org.apache.hadoop.fs.FileSystem.moveFromLocalFile(FileSystem.java:1858)
    at com.solix.bigdata.MovetoHdfs.moveFile(MovetoHdfs.java:105)
    at com.solix.bigdata.MovetoHdfs.main(MovetoHdfs.java:46)
Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.AccessControlException): Permission denied: user=Srinivas.budida, access=WRITE, inode="/user/hdfs/Test":root:hadoop:-rw-r--r--
    at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkFsPermission(DefaultAuthorizationProvider.java:257)
    at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.check(DefaultAuthorizationProvider.java:238)
    at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkPermission(DefaultAuthorizationProvider.java:151)
    at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:138)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6286)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6268)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPathAccess(FSNamesystem.java:6193)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:2621)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2545)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2430)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:551)
    at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.create(AuthorizationProviderProxyClientProtocol.java:108)
    at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:388)
    at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
    at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:587)
    at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1026)
    at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2013)
    at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2009)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Subject.java:415)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
    at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2007)

    at org.apache.hadoop.ipc.Client.call(Client.java:1409)
    at org.apache.hadoop.ipc.Client.call(Client.java:1362)
    at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
    at com.sun.proxy.$Proxy9.create(Unknown Source)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:606)
    at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:186)
    at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
    at com.sun.proxy.$Proxy9.create(Unknown Source)
    at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.create(ClientNamenodeProtocolTranslatorPB.java:258)
    at org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:1599)
    ... 17 more

共有1个答案

洪博涛
2023-03-14

使用下面的代码段。这是一个在HDFS上创建目录的简单示例。使用dfs对象。

import java.io.IOException;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;

import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class testinghdfs {

public static void main(String[] args) throws IOException, URISyntaxException, InterruptedException 

{ 
try{
UserGroupInformation ugi
    = UserGroupInformation.createRemoteUser("hdfs");

  ugi.doAs(new PrivilegedExceptionAction<Void>() {

 public Void run() throws Exception {

   Configuration config = new Configuration();
   config.addResource(new Path("/etc/hadoop/conf/core-site.xml"));
   config.addResource(new Path("/etc/hadoop/conf/hdfs-site.xml"));

   config.set("fs.hdfs.impl", 
           org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
       config.set("fs.file.impl",org.apache.hadoop.fs.LocalFileSystem.class.getName());
 FileSystem dfs = FileSystem.get(config);

  String dirName = "TestDirectory";
  System.out.println(dfs.getWorkingDirectory() +" this is from /n/n");
  Path src = new Path(dfs.getWorkingDirectory()+"/"+dirName);
  // dfs.mkdirs(src); 

    dfs.copyFromLocalFile(new Path("/home/inputs/TMPAAA022389.RPT"), new Path("/user/hdfs/test"));



  return null;
 }
});
}
catch(Exception e){}
 }}
 类似资料:
  • 我在*上运行命令< code >授予所有权限。*到由“root”标识的“root”@“%”; 并且它不断显示以下错误 11: 43:51向“root”标识的“root“@”%“授予所有权限。错误代码:1045。拒绝用户“root‘@”%的访问(使用密码:YES)0.00045秒 我还检查了Mysql上的User表,如下所示。 我不知道这是怎么回事?: 不重新安装MySQL怎么解决这个问题?(因为,

  • 问题内容: 我正在尝试使用PHP创建文件,但无法正常工作。我假设这是因为它没有写访问权限(以前一直是问题)。我试图通过使文件夹chmod 0777来测试这是否是问题,但是最终使该目录中的每个脚本都返回了500条错误消息,直到我将其改回为止。如何授予PHP对文件系统的写访问权,以便它可以创建文件? 编辑:它托管在使用Apache的Hostgator共享托管上。 编辑2:有人要求输入代码:该代码是GD

  • 我的目标是能够使用OAuth令牌调用Google Storage REST API,向经过身份验证的Google用户授予对我的存储桶中名为“directoryName”的目录的读/写权限。 到目前为止,我已经成功地在将用户添加到存储桶的ACL后使用了存储API。但是,我不想授予用户对完整存储桶的READ或WRITE权限,而只授予用户在存储桶内的目录的权限,例如桶/目录名称。 e、 g.我希望能够调

  • 此外,hdfs目录由运行集群的超级用户(hadoop超级用户而不是unix超级用户)创建,然后将所有权转移给特定用户。

  • 我有问题在Android Studio中创建外部存储的新目录。它可能必须做一些有权限的事情,但据我所知,一切似乎都很好。 我想做的是: 检查目录“/storage/emulated/0/Recordings”是否存在,如果不存在-创建目录 检查文件“/storage/simulated/0/Recordings/tempFile.raw”是否存在,如果存在-将其删除 使用“new FileOutp

  • 本文向大家介绍oauth 授权码授予,包括了oauth 授权码授予的使用技巧和注意事项,需要的朋友参考一下 示例 第1步 第2步 资源