有 Java 编程相关的问题?

你可以在下面搜索框中键入要查询的问题!

java如何设置独立的Spark配置以在本地运行MLlib Spark示例?

我想在我的PC上本地运行Spark MLlib examples(我认为它是独立的)。我想运行JavaWord2VecExample。java。这个文件配置是为在一些只有一个主服务器的workers上运行Spark的会话设置的,但我只想在我的PC(本地)上运行这个类。原始类源代码如下:

package org.apache.spark.examples.ml;

// $example on$
import java.util.Arrays;
import java.util.List;

import org.apache.spark.SparkConf;
import org.apache.spark.ml.feature.Word2Vec;
import org.apache.spark.ml.feature.Word2VecModel;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.*;
// $example off$

public class JavaWord2VecExample {
  public static void main(String[] args) {

    SparkSession spark = SparkSession
      .builder()
      .appName("JavaWord2VecExample")
      .getOrCreate();

    // $example on$
    // Input data: Each row is a bag of words from a sentence or document.
    List<Row> data = Arrays.asList(
      RowFactory.create(Arrays.asList("Hi I heard about Spark".split(" "))),
      RowFactory.create(Arrays.asList("I wish Java could use case classes".split(" "))),
      RowFactory.create(Arrays.asList("Logistic regression models are neat".split(" ")))
    );
    StructType schema = new StructType(new StructField[]{
      new StructField("text", new ArrayType(DataTypes.StringType, true), false, Metadata.empty())
    });
    Dataset<Row> documentDF = spark.createDataFrame(data, schema);

    // Learn a mapping from words to Vectors.
    Word2Vec word2Vec = new Word2Vec()
      .setInputCol("text")
      .setOutputCol("result")
      .setVectorSize(3)
      .setMinCount(0);

    Word2VecModel model = word2Vec.fit(documentDF);
    Dataset<Row> result = model.transform(documentDF);

    for (Row row : result.collectAsList()) {
      List<String> text = row.getList(0);
      Vector vector = (Vector) row.get(1);
      System.out.println("Text: " + text + " => \nVector: " + vector + "\n");
    }
    // $example off$
    List<String> text = row.getList(0);
      Vector vector = (Vector) row.get(1);
      System.out.println("Text: " + text + " => \nVector: " + vector + "\n");
    spark.stop();
  }
}

我知道,如果我想在本地PC上运行示例,我应该将SparkConf替换为SparkSession。所以,我尝试过,目前的源代码是:

package org.apache.spark.examples.ml;

// $example on$
import java.util.Arrays;
import java.util.List;

import org.apache.spark.SparkConf;
import org.apache.spark.ml.feature.Word2Vec;
import org.apache.spark.ml.feature.Word2VecModel;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.*;
// $example off$

public class JavaWord2VecExample {
  public static void main(String[] args) {


    SparkConf spark = new SparkConf()
            .setAppName("JavaWord2VecExample")
            .set("spark.storage.memoryFraction", "1")
            .setMaster("spark://master:7077");

    // $example on$
    // Input data: Each row is a bag of words from a sentence or document.
    List<Row> data = Arrays.asList(
      RowFactory.create(Arrays.asList("Hi I heard about Spark".split(" "))),
      RowFactory.create(Arrays.asList("I wish Java could use case classes".split(" "))),
      RowFactory.create(Arrays.asList("Logistic regression models are neat".split(" ")))
    );
    StructType schema = new StructType(new StructField[]{
      new StructField("text", new ArrayType(DataTypes.StringType, true), false, Metadata.empty())
    });
    Dataset<Row> documentDF = spark.createDataFrame(data, schema);

    // Learn a mapping from words to Vectors.
    Word2Vec word2Vec = new Word2Vec()
      .setInputCol("text")
      .setOutputCol("result")
      .setVectorSize(3)
      .setMinCount(0);

    Word2VecModel model = word2Vec.fit(documentDF);
    Dataset<Row> result = model.transform(documentDF);

    for (Row row : result.collectAsList()) {
      List<String> text = row.getList(0);
      Vector vector = (Vector) row.get(1);
      System.out.println("Text: " + text + " => \nVector: " + vector + "\n");
    }
    // $example off$
    List<String> text = row.getList(0);
      Vector vector = (Vector) row.get(1);
      System.out.println("Text: " + text + " => \nVector: " + vector + "\n");
    spark.stop();
  }
}

因此,出现了一些错误:

Error: java:cannot find symbol

对于方法createDataFrame()stop()

我是java和Spark的新手。请帮助我修复这些错误。 谢谢你的回答


共 (0) 个答案