я получил удивительно siplme приложение Java, которое я почти скопированный из этого одного примера: http://markmail.org/download.xqy?id=zua6upabiylzeetp&number=2Спарк и применение Cassandra Java: Исключение в потоке «главный» java.lang.NoClassDefFoundError: орг/апач/искра/SQL/Dataset
Все, что я хотел сделать, это прочитать данные таблицы и отобразить их на консоли Eclipse.
Мой pom.xml:
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>chat_connaction_test</groupId>
<artifactId>ChatSparkConnectionTest</artifactId>
<version>0.0.1-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-core</artifactId>
<version>3.1.0</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.10</artifactId>
<version>2.0.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/com.datastax.spark/spark-cassandra-connector_2.10 -->
<dependency>
<groupId>com.datastax.spark</groupId>
<artifactId>spark-cassandra-connector_2.10</artifactId>
<version>2.0.0-M3</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-streaming_2.10 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.10</artifactId>
<version>2.0.0</version>
</dependency>
<!--
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.10</artifactId>
<version>1.5.2</version>
</dependency>
-->
</dependencies>
</project>
И мой Java-код:
package com.chatSparkConnactionTest;
import static com.datastax.spark.connector.japi.CassandraJavaUtil.javaFunctions;
import java.io.Serializable;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import com.datastax.spark.connector.japi.CassandraRow;
public class JavaDemo implements Serializable {
private static final long serialVersionUID = 1L;
public static void main(String[] args) {
SparkConf conf = new SparkConf().
setAppName("chat").
setMaster("local").
set("spark.executor.memory","1g").
set("spark.cassandra.connection.host", "127.0.0.1");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<String> cassandraRowsRDD = javaFunctions(sc).cassandraTable(
"chat", "dictionary")
.map(new Function<CassandraRow, String>() {
@Override
public String call(CassandraRow cassandraRow) throws Exception {
String tempResult = cassandraRow.toString();
System.out.println(tempResult);
return tempResult;
}
}
);
System.out.println("Data as CassandraRows: \n" +
cassandraRowsRDD.collect().size()); // THIS IS A LINE WITH ERROR
}
}
А вот моя ошибка:
16/10/05 20:49:18 INFO CassandraConnector: Connected to Cassandra cluster: Test Cluster Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/spark/sql/Dataset at java.lang.Class.getDeclaredMethods0(Native Method) at java.lang.Class.privateGetDeclaredMethods(Unknown Source) at java.lang.Class.getDeclaredMethod(Unknown Source) at java.io.ObjectStreamClass.getPrivateMethod(Unknown Source) at java.io.ObjectStreamClass.access$1700(Unknown Source) at java.io.ObjectStreamClass$2.run(Unknown Source) at java.io.ObjectStreamClass$2.run(Unknown Source) at java.security.AccessController.doPrivileged(Native Method) at java.io.ObjectStreamClass.(Unknown Source) at java.io.ObjectStreamClass.lookup(Unknown Source) at java.io.ObjectOutputStream.writeObject0(Unknown Source) at java.io.ObjectOutputStream.defaultWriteFields(Unknown Source) at java.io.ObjectOutputStream.writeSerialData(Unknown Source) at java.io.ObjectOutputStream.writeOrdinaryObject(Unknown Source) at java.io.ObjectOutputStream.writeObject0(Unknown Source) at java.io.ObjectOutputStream.defaultWriteFields(Unknown Source) at java.io.ObjectOutputStream.writeSerialData(Unknown Source) at java.io.ObjectOutputStream.writeOrdinaryObject(Unknown Source) at java.io.ObjectOutputStream.writeObject0(Unknown Source) at java.io.ObjectOutputStream.writeObject(Unknown Source) at scala.collection.immutable.$colon$colon.writeObject(List.scala:379) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source) at java.lang.reflect.Method.invoke(Unknown Source) at java.io.ObjectStreamClass.invokeWriteObject(Unknown Source) at java.io.ObjectOutputStream.writeSerialData(Unknown Source) at java.io.ObjectOutputStream.writeOrdinaryObject(Unknown Source) at java.io.ObjectOutputStream.writeObject0(Unknown Source) at java.io.ObjectOutputStream.defaultWriteFields(Unknown Source) at java.io.ObjectOutputStream.writeSerialData(Unknown Source) at java.io.ObjectOutputStream.writeOrdinaryObject(Unknown Source) at java.io.ObjectOutputStream.writeObject0(Unknown Source) at java.io.ObjectOutputStream.defaultWriteFields(Unknown Source) at java.io.ObjectOutputStream.writeSerialData(Unknown Source) at java.io.ObjectOutputStream.writeOrdinaryObject(Unknown Source) at java.io.ObjectOutputStream.writeObject0(Unknown Source) at java.io.ObjectOutputStream.defaultWriteFields(Unknown Source) at java.io.ObjectOutputStream.writeSerialData(Unknown Source) at java.io.ObjectOutputStream.writeOrdinaryObject(Unknown Source) at java.io.ObjectOutputStream.writeObject0(Unknown Source) at java.io.ObjectOutputStream.writeObject(Unknown Source) at org.apache.spark.serializer.JavaSerializationStream.writeObject(JavaSerializer.scala:43) at org.apache.spark.serializer.JavaSerializerInstance.serialize(JavaSerializer.scala:100) at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:295) at org.apache.spark.util.ClosureCleaner$.org$apache$spark$util$ClosureCleaner$$clean(ClosureCleaner.scala:288) at org.apache.spark.util.ClosureCleaner$.clean(ClosureCleaner.scala:108) at org.apache.spark.SparkContext.clean(SparkContext.scala:2037) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1896) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1911) at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:893) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112) at org.apache.spark.rdd.RDD.withScope(RDD.scala:358) at org.apache.spark.rdd.RDD.collect(RDD.scala:892) at org.apache.spark.api.java.JavaRDDLike$class.collect(JavaRDDLike.scala:360) at org.apache.spark.api.java.AbstractJavaRDDLike.collect(JavaRDDLike.scala:45) at com.chatSparkConnactionTest.JavaDemo.main(JavaDemo.java:37) Caused by: java.lang.ClassNotFoundException: org.apache.spark.sql.Dataset at java.net.URLClassLoader.findClass(Unknown Source) at java.lang.ClassLoader.loadClass(Unknown Source) at sun.misc.Launcher$AppClassLoader.loadClass(Unknown Source) at java.lang.ClassLoader.loadClass(Unknown Source) ... 58 more
Я сделал мой pom.xml обновляется, но что не разрешил ошибку. Может ли кто-нибудь помочь мне решить эту проблему?
Спасибо!
Update 1: вот мой скриншот пути сборки: Link to my screenshot
Большое вам спасибо! Все работает на сегодня (я добавил зависимости, как вы описали) –
Просто небольшой вопрос - какой из них предпочтительнее, если мне нужен json в качестве вывода - ** SparkSession и Dataset API ** или ** RDD **? –
Я исследовал, API-интерфейс Dataset определенно предпочтительнее в моем случае –