簡體   English   中英

Scala:Scala中多項式曲線擬合的SPARK UDAF,出現“type (char[]) cannot be converted to the string type”錯誤

[英]Scala: SPARK UDAF for polynomial curve fitting in Scala, got " type (char[]) cannot be converted to the string type" error

我正在嘗試對類似於下面的 Spark 數據框進行多項式曲線擬合(使用 SPARK 版本 2.4.0.7.1.5、Scala 版本 2.11.12(OpenJDK 64 位服務器 VM,Java 1.8.0_232))。

我為此寫了一個 UDAF,它可以注冊,但在運行時出現錯誤。

我是 Scala 和 UDAF 的新手。 能不能幫我看看我的function有什么問題?

謝謝,

示例 df

val n = 2

val data = Seq(
  (1,80.0,-0.361982467), (1,70.0,0.067847447),  (1,50.0,-0.196768255), 
  (1,40.0,-0.135489192), (1,65.0,0.005993648),  (1,75.0,0.037561161), 
  (1,60.0,-0.212658599), (1,55.0,-0.187080872), (1,85.0, 0.382061571),
  (2,80.0,-0.301982467), (2,70.0,0.097847447),  (2,50.0,-0.186768255), 
  (2,40.0,-0.105489192), (2,65.0,0.007993648),  (2,75.0,0.037561161), 
  (2,60.0,-0.226528599), (2,55.0,-0.170870872), (2,85.0, 0.320615718)
)

val df = data.toDF("id", "x","y")

UDAF代碼

import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.types._
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}


class Fitter extends UserDefinedAggregateFunction {
  /**
   * Polynomial curve fitting
   *   y = c + a1*x + a2*x^2 + ...+ an * x^n
   * parameters:
   *    x: Array[Double]
   *    y: Array[Double]
   *    n: Int, polynomial degree
   * Return:
   *  coeff: the fitted parameters [c, a1, a2,...,an]
   */

  private def polyCurveFitting= (x: Array[Double], y: Array[Double], n: Int) => {

    val obs = new WeightedObservedPoints()

    for (i <- 0 until x.size) {
      obs.add(x(i), y(i))
    }

    // Instantiate a second-degree polynomial fitter.
    val fitter = PolynomialCurveFitter.create(n)

    // Retrieve fitted parameters (coefficients of the polynomial function).
    val coeff = fitter.fit(obs.toList())

    coeff.mkString("|")
  }

  override def inputSchema: StructType =
    new StructType().add(StructField("x", DoubleType))
                    .add(StructField("y", DoubleType))
                    .add(StructField("n", IntegerType))

  override def bufferSchema: StructType =
    new StructType().add(StructField("x_", ArrayType(DoubleType, false)))
                    .add(StructField("y_", ArrayType(DoubleType, false)))
                    .add(StructField("n_", IntegerType))

  override def dataType: DataType = StringType

  override def deterministic: Boolean = true

  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    buffer.update(0, Array[Double]())
    buffer.update(1, Array[Double]())
    buffer.update(2, 0)
  }

  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    if(!input.isNullAt(0)) {
      buffer(0) = buffer.getSeq[Double](0).toArray :+ input.getAs[Double](0)
      buffer(1) = buffer.getSeq[Double](1).toArray :+ input.getAs[Double](1)
      buffer(2) = input.getAs[Int](2)
    }
  }

  def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    buffer1(0) = buffer1.getSeq[Double](0).toArray ++ buffer2.getSeq[Double](0)
    buffer1(1) = buffer1.getSeq[Double](1).toArray ++ buffer2.getSeq[Double](1)
    buffer1(2) = buffer2.getAs[Int](2)
  }

def evaluate(buffer: Row): Array[Char] =
   polyCurveFitting(buffer.getSeq[Double](0).toArray,
                    buffer.getSeq[Double](1).toArray,
                    buffer.getAs[Int](2)).toArray
}


撥打電話 function

val fitter_test = new Fitter()

spark.udf.register("fitter", fitter_test)

df.createOrReplaceTempView("test")

spark.sql("select fitter(x,y,2) from test group by id").show()

val df_poly = df.groupBy("id").agg(fitter($"x",$"y",lit(n)).as("estimated_parameters"))

df_poly.show()

預期輸出(偽):

+---+-----------------------------------------------------------------+
| id|                                             estimated_parameters|
+---+-----------------------------------------------------------------+
|  1|"0.5034579587428405|-0.026916449551428016|2.6802822386554184E-4" |
|  2|"0.5344951514280016|-0.020286916457958744|2.6916469164575874E-4" |
+---+-----------------------------------------------------------------+

錯誤信息:

WARN scheduler.TaskSetManager: Lost task 18.0 in stage 7.0 (TID 27, -----.analytics.loc, executor 19): java.lang.IllegalArgumentException: The value ([C@52a57e78) of the type (char[]) cannot be converted to the string type
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$StringConverter$.toCatalystImpl(CatalystTypeConverters.scala:290)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$StringConverter$.toCatalystImpl(CatalystTypeConverters.scala:285)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:103)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:396)
    at org.apache.spark.sql.execution.aggregate.ScalaUDAF.eval(udaf.scala:444)
    at org.apache.spark.sql.execution.aggregate.AggregationIterator$$anonfun$generateResultProjection$1.apply(AggregationIterator.scala:232)
    at org.apache.spark.sql.execution.aggregate.AggregationIterator$$anonfun$generateResultProjection$1.apply(AggregationIterator.scala:224)
    at org.apache.spark.sql.execution.aggregate.SortBasedAggregationIterator.next(SortBasedAggregationIterator.scala:150)
    at org.apache.spark.sql.execution.aggregate.SortBasedAggregationIterator.next(SortBasedAggregationIterator.scala:29)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:266)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:257)
    at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:858)
    at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:858)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
    at org.apache.spark.scheduler.Task.run(Task.scala:123)
    at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
    at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1289)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
    at java.lang.Thread.run(Thread.java:748)
ERROR scheduler.TaskSetManager: Task 18 in stage 7.0 failed 4 times; aborting job
org.apache.spark.SparkException: Job aborted due to stage failure: Task 18 in stage 7.0 failed 4 times, most recent failure: Lost task 18.3 in stage 7.0 (TID 52, --------.analytics.loc, executor 19): java.lang.IllegalArgumentException: The value ([C@4f761fc2) of the type (char[]) cannot be converted to the string type
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$StringConverter$.toCatalystImpl(CatalystTypeConverters.scala:290)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$StringConverter$.toCatalystImpl(CatalystTypeConverters.scala:285)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:103)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:396)
    at org.apache.spark.sql.execution.aggregate.ScalaUDAF.eval(udaf.scala:444)
    at org.apache.spark.sql.execution.aggregate.AggregationIterator$$anonfun$generateResultProjection$1.apply(AggregationIterator.scala:232)
    at org.apache.spark.sql.execution.aggregate.AggregationIterator$$anonfun$generateResultProjection$1.apply(AggregationIterator.scala:224)
    at org.apache.spark.sql.execution.aggregate.SortBasedAggregationIterator.next(SortBasedAggregationIterator.scala:150)
    at org.apache.spark.sql.execution.aggregate.SortBasedAggregationIterator.next(SortBasedAggregationIterator.scala:29)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:266)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:257)
    at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:858)
    at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:858)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
    at org.apache.spark.scheduler.Task.run(Task.scala:123)
    at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
    at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1289)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
    at java.lang.Thread.run(Thread.java:748)

Caused by: java.lang.IllegalArgumentException: The value ([C@4f761fc2) of the type (char[]) cannot be converted to the string type
  at org.apache.spark.sql.catalyst.CatalystTypeConverters$StringConverter$.toCatalystImpl(CatalystTypeConverters.scala:290)
  at org.apache.spark.sql.catalyst.CatalystTypeConverters$StringConverter$.toCatalystImpl(CatalystTypeConverters.scala:285)
  at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:103)
  at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:396)
  at org.apache.spark.sql.execution.aggregate.ScalaUDAF.eval(udaf.scala:444)
  at org.apache.spark.sql.execution.aggregate.AggregationIterator$$anonfun$generateResultProjection$1.apply(AggregationIterator.scala:232)
  at org.apache.spark.sql.execution.aggregate.AggregationIterator$$anonfun$generateResultProjection$1.apply(AggregationIterator.scala:224)
  at org.apache.spark.sql.execution.aggregate.SortBasedAggregationIterator.next(SortBasedAggregationIterator.scala:150)
  at org.apache.spark.sql.execution.aggregate.SortBasedAggregationIterator.next(SortBasedAggregationIterator.scala:29)
  at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:266)
  at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:257)
  at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:858)
  at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:858)
  at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
  at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
  at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
  at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
  at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
  at org.apache.spark.scheduler.Task.run(Task.scala:123)
  at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1289)
  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
  at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
  at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
  at java.lang.Thread.run(Thread.java:748)

我認為問題與方法evaluate的返回值類型有關。 Spark 編譯器需要一個字符串,正如您在 dataType 方法中輸入的那樣,因此它會檢測到該類型不匹配。 如果您刪除 the.toArray 並在evaluate方法中返回一個 String,該錯誤應該會消失。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM