簡體   English   中英

Spark scala: java.lang.ClassCastException: java.lang.Integer cannot be cast to scala.collection.Seq

[英]Spark scala: java.lang.ClassCastException: java.lang.Integer cannot be cast to scala.collection.Seq

在以下代碼中:

def mapAppsToSparseVector(appFeatures: List[String], row: Row): SparseVector = {
    val vectorSize = appFeatures.length
    val indices = new ArrayBuffer[Int]()
    val values = new ArrayBuffer[Double]()
    val apps = row.getList[Tuple4[Int, String, String, String]](0).get(0)._4
    apps.split(":").foreach(m => if(appFeatures.indexOf(m) != -1) {indices += appFeatures.indexOf(m); values += 1.0})
    new SparseVector(vectorSize, indices.toArray, values.toArray)
}

val marketsToAdd = List("m1", "m3", "m5")
val columns = Array("id", "category", "color", "markets")
val df3 = spark.sqlContext.createDataFrame(
  Seq((0, "apples", "red", "m0:m1:m2"),
      (1, "oranges", "orange", "m0:m3"),
      (2, "bananas", "yellow", "m4:m5"),
      (3, "apples", "red", "m0"),
      (4, "bananas", "yellow", "m6:m7"),
      (5, "oranges", "orange", "m5:m7"),
      (6, "oranges", "orange", "m7:m0")
    )).toDF(columns: _*)
import spark.implicits._
val df5 = df3.map(r => (r.getInt(0), mapAppsToSparseVector(marketsToAdd, r))).toDF("id", "features")
df5.printSchema

到目前為止一切順利,架構看起來像:

root
 |-- id: integer (nullable = false)
 |-- features: vector (nullable = true)

正如預期的那樣。 但是,在嘗試 df5.show() 時,出現以下錯誤。 我對火花有點陌生,我嘗試了一些方法,例如嘗試在帶有 Seq 的 map 上添加隱式 function,但我仍然遇到同樣的錯誤。 有人知道發生了什么嗎?

org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 64.0 failed 1 times, most recent failure: Lost task 0.0 in stage 64.0 (TID 176, localhost, executor driver): java.lang.ClassCastException: java.lang.Integer cannot be cast to scala.collection.Seq
    at org.apache.spark.sql.Row$class.getSeq(Row.scala:283)
    at org.apache.spark.sql.catalyst.expressions.GenericRow.getSeq(rows.scala:166)
    at org.apache.spark.sql.Row$class.getList(Row.scala:291)
    at org.apache.spark.sql.catalyst.expressions.GenericRow.getList(rows.scala:166)
    at mapAppsToSparseVector(<console>:36)
    at $anonfun$1.apply(<console>:82)
    at $anonfun$1.apply(<console>:82)
    at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.mapelements_doConsume_0$(Unknown Source)
    at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.deserializetoobject_doConsume_0$(Unknown Source)
    at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
    at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
    at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:636)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:255)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
    at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
    at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
    at org.apache.spark.scheduler.Task.run(Task.scala:123)
    at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
    at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
    at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
  at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1889)
  at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1877)
  at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1876)
  at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
  at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
  at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1876)
  at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
  at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
  at scala.Option.foreach(Option.scala:257)
  at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:926)
  at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2110)
  at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2059)
  at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2048)
  at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
  at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:737)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2082)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
  at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:365)
  at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
  at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3389)
  at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2550)
  at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2550)
  at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3370)
  at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
  at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
  at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
  at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3369)
  at org.apache.spark.sql.Dataset.head(Dataset.scala:2550)
  at org.apache.spark.sql.Dataset.take(Dataset.scala:2764)
  at org.apache.spark.sql.Dataset.getRows(Dataset.scala:254)
  at org.apache.spark.sql.Dataset.showString(Dataset.scala:291)
  at org.apache.spark.sql.Dataset.show(Dataset.scala:751)
  at org.apache.spark.sql.Dataset.show(Dataset.scala:710)
  at org.apache.spark.sql.Dataset.show(Dataset.scala:719)
  ... 46 elided
Caused by: java.lang.ClassCastException: java.lang.Integer cannot be cast to scala.collection.Seq
  at org.apache.spark.sql.Row$class.getSeq(Row.scala:283)
  at org.apache.spark.sql.catalyst.expressions.GenericRow.getSeq(rows.scala:166)
  at org.apache.spark.sql.Row$class.getList(Row.scala:291)
  at org.apache.spark.sql.catalyst.expressions.GenericRow.getList(rows.scala:166)
  at mapAppsToSparseVector(<console>:36)
  at $anonfun$1.apply(<console>:82)
  at $anonfun$1.apply(<console>:82)
  at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.mapelements_doConsume_0$(Unknown Source)
  at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.deserializetoobject_doConsume_0$(Unknown Source)
  at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
  at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:636)
  at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:255)
  at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
  at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
  at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
  at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
  at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
  at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
  at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
  at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
  at org.apache.spark.scheduler.Task.run(Task.scala:123)
  at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
  at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
  at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
  ... 1 more

我通常不直接處理 Row ,但看起來下面一行是問題所在:

val apps = row.getList[Tuple4[Int, String, String, String]](0).get(0)._4

您試圖將第 0 個字段作為java.util.List[Tuple4[Int, String, String, String]] ,但 dataframe 中的第 0 個字段實際上是一個 Int。 我想您可能認為Row.getType方法比實際復雜一些。

以下任何一項都應該解決問題(假設我知道我在說什么):

// by field name
row.getString(row.fieldIndex("markets"))
// by index
row.getString(4)

不過,正如一些人所評論的那樣,將您的 function 實現為 udf 會使事情更容易處理。

不確定用例,但如果你想使用上面的代碼,修改如下 -

  def mapAppsToSparseVector(appFeatures: List[String], row: Row): SparseVector = {
      val vectorSize = appFeatures.length
      val indices = new ArrayBuffer[Int]()
      val values = new ArrayBuffer[Double]()
//      val apps = row.getList[Tuple4[Int, String, String, String]](0).get(0)._4
      row.toSeq(3).asInstanceOf[String].split(":")
        .foreach(m => if(appFeatures.indexOf(m) != -1) {
          indices += appFeatures.indexOf(m)
          values += 1.0
        })
      new SparseVector(vectorSize, indices.toArray, values.toArray)
    }

    val marketsToAdd = List("m1", "m3", "m5")
    val columns = Array("id", "category", "color", "markets")
    val df3 = sqlContext.createDataFrame(
      Seq((0, "apples", "red", "m0:m1:m2"),
        (1, "oranges", "orange", "m0:m3"),
        (2, "bananas", "yellow", "m4:m5"),
        (3, "apples", "red", "m0"),
        (4, "bananas", "yellow", "m6:m7"),
        (5, "oranges", "orange", "m5:m7"),
        (6, "oranges", "orange", "m7:m0")
      )).toDF(columns: _*)
    val implicits = sqlContext.sparkSession.implicits
    import implicits._
    val df5 = df3.map(r => (r.getInt(0), mapAppsToSparseVector(marketsToAdd, r))).toDF("id", "features")
    df5.printSchema
    df5.show(false)
//    root
//    |-- id: integer (nullable = false)
//    |-- features: vector (nullable = true)
//
//    +---+-------------+
//    |id |features     |
//    +---+-------------+
//    |0  |(3,[0],[1.0])|
//    |1  |(3,[1],[1.0])|
//    |2  |(3,[2],[1.0])|
//    |3  |(3,[],[])    |
//    |4  |(3,[],[])    |
//    |5  |(3,[2],[1.0])|
//    |6  |(3,[],[])    |
//    +---+-------------+

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM