当前位置: 首页 > 知识库问答 >
问题:

无法连接Spark-Cloudant

夏侯衡
2023-03-14

下面是我的java代码,

package spark.cloudant.connecter;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.SQLContext;
import com.cloudant.spark.*;

public class cloudantconnecter {
    public static void main(String[] args) throws Exception {

        try {
            SparkConf sparkConf = new SparkConf().setAppName("spark cloudant connecter").setMaster("local[*]");
            sparkConf.set("spark.streaming.concurrentJobs", "30");

            JavaSparkContext sc = new JavaSparkContext(sparkConf);

            SQLContext sqlContext = new SQLContext(sc);
            System.out.print("initialization successfully");


            Dataset<org.apache.spark.sql.Row> st = sqlContext.read().format("com.cloudant.spark")
                    .option("cloudant.host", "HOSTNAME").option("cloudant.username", "USERNAME")
                    .option("cloudant.password", "PASSWORD").load("DATABASENAME");

            st.printSchema();


        } catch (

        Exception e) {
            e.printStackTrace();
        }
    }
}

Maven依赖项

<dependencies>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_2.10</artifactId>
            <version>2.0.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-mllib_2.10</artifactId>
            <version>2.0.0</version>
        </dependency>
        <dependency>
            <groupId>cloudant-labs</groupId>
            <artifactId>spark-cloudant</artifactId>
            <version>2.0.0-s_2.11</version>
        </dependency>
    </dependencies>

获取错误详细信息,

Exception in thread "main" java.lang.NoSuchMethodError: scala/Predef$.ArrowAssoc(Ljava/lang/Object;)Ljava/lang/Object; (loaded from file:/C:/Users/Administrator/.m2/repository/org/scala-lang/scala-library/2.10.6/scala-library-2.10.6.jar by sun.misc.Launcher$AppClassLoader@9f916f97) called from class scalaj.http.HttpConstants$ (loaded from file:/C:/Users/Administrator/.m2/repository/org/scalaj/scalaj-http_2.11/2.3.0/scalaj-http_2.11-2.3.0.jar by sun.misc.Launcher$AppClassLoader@9f916f97).
    at scalaj.http.HttpConstants$.liftedTree1$1(Http.scala:637)
    at scalaj.http.HttpConstants$.<init>(Http.scala:636)
    at scalaj.http.HttpConstants$.<clinit>(Http.scala)
    at scalaj.http.BaseHttp$.$lessinit$greater$default$2(Http.scala:754)
    at scalaj.http.Http$.<init>(Http.scala:738)
    at scalaj.http.Http$.<clinit>(Http.scala)
    at com.cloudant.spark.common.JsonStoreDataAccess.getQueryResult(JsonStoreDataAccess.scala:152)
    at com.cloudant.spark.common.JsonStoreDataAccess.getTotalRows(JsonStoreDataAccess.scala:99)
    at com.cloudant.spark.common.JsonStoreRDD.totalRows$lzycompute(JsonStoreRDD.scala:56)
    at com.cloudant.spark.common.JsonStoreRDD.totalRows(JsonStoreRDD.scala:55)
    at com.cloudant.spark.common.JsonStoreRDD.totalPartition$lzycompute(JsonStoreRDD.scala:59)
    at com.cloudant.spark.common.JsonStoreRDD.totalPartition(JsonStoreRDD.scala:58)
    at com.cloudant.spark.common.JsonStoreRDD.getPartitions(JsonStoreRDD.scala:81)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
    at scala.Option.getOrElse(Option.scala:120)
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
    at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:248)
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:246)
    at scala.Option.getOrElse(Option.scala:120)
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:246)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1934)
    at org.apache.spark.rdd.RDD$$anonfun$fold$1.apply(RDD.scala:1046)
    at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
    at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
    at org.apache.spark.rdd.RDD.withScope(RDD.scala:358)
    at org.apache.spark.rdd.RDD.fold(RDD.scala:1040)
    at org.apache.spark.sql.execution.datasources.json.InferSchema$.infer(InferSchema.scala:68)
    at org.apache.spark.sql.DataFrameReader$$anonfun$3.apply(DataFrameReader.scala:317)
    at org.apache.spark.sql.DataFrameReader$$anonfun$3.apply(DataFrameReader.scala:317)
    at scala.Option.getOrElse(Option.scala:120)
    at org.apache.spark.sql.DataFrameReader.json(DataFrameReader.scala:316)
    at com.cloudant.spark.DefaultSource.create(DefaultSource.scala:127)
    at com.cloudant.spark.DefaultSource.createRelation(DefaultSource.scala:105)
    at com.cloudant.spark.DefaultSource.createRelation(DefaultSource.scala:100)
    at org.apache.spark.sql.execution.datasources.DataSource.resolveRelation(DataSource.scala:315)
    at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:149)
    at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:132)
    at spark.cloudant.connecter.cloudantconnecter.main(cloudantconnecter.java:24)

共有1个答案

富锦
2023-03-14

显示错误,因为使用Scala2.10的问题库和使用2.11的包spark cloudant库

因此请将库spark-core2.10更改为spark-core2.11

所以现在依赖关系是,

<dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_2.11</artifactId>
            <version>2.0.1</version>
        </dependency>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-mllib_2.11</artifactId>
            <version>2.0.1</version>
        </dependency>   
        <dependency>
            <groupId>cloudant-labs</groupId>
            <artifactId>spark-cloudant</artifactId>
            <version>2.0.0-s_2.11</version>
        </dependency>
 类似资料:
  • 我试图连接到使用火花minio提供的s3,但它是说桶迷你库不存在。( 我正在使用以下指南进行连接。 https://github.com/minio/cookbook/blob/master/docs/apache-spark-with-minio.md 这些是我在scala中使用的依赖项。 "org.apache.spark " %% "spack-core"%"2.4.0","org.apac

  • 问题-无法使用Spark Cassandra连接器1.5.0连接Cassandra 3.0 根据DataStax Spark Cassandra Connector文档,它说Spark Connector 1.5可以从Spark 1.5.0/1.6.0用于Cassandra 3.0。 你能告诉我我是不是漏掉了哪一步? 尝试的方法 在“pom.xml”中添加了单独的番石榴依赖项 提前谢了。

  • 我将DataGrip配置为使用spark安装文件夹中的JDBC库。

  • 我正在尝试使用Spark-cassandra连接器连接到spark shell中的远程cassandra集群。但它抛出了一些不寻常的错误。 我按照spark-cassandra connector的github页面中提到的常规操作 Scala Version-2.3.1连接器版本=2.1.0

  • 请有人能帮我摆脱这个问题。 以下错误:java.lang.IllegalArgumentException:''在“schema_name”.“table_name”表中不存在。“table_name”表在io.pivotal.greenplum.spark.greenplumRelationProvider.createrelation(GreenplumRelationProvider.sca

  • 该应用程序有一个类,使用以下行创建上下文 其中这个IP是火花工作的机器的IP。