簡體   English   中英

擴展org.apache.spark.sql.Row功能:Spark Scala

[英]Extend org.apache.spark.sql.Row functionality : Spark Scala

火花行特征

 /** Returns true if there are any NULL values in this row. */
  def anyNull: Boolean = {
    val len = length
    var i = 0
    while (i < len) {
      if (isNullAt(i)) { return true }
      i += 1
    }
    false
  }

可用於評估任何值的行連續為空。 同樣,我想評估任何值
1)哪個是空字符串
2)我會傳遞任何字符串嗎?

有什么辦法可以擴展org.apache.spark.sql.Row的功能?

遵循trait Row方法anyNull使用的慣用樣式,這是一種用於檢查Row StringType元素是否等於輸入字符串的方法:

import org.apache.spark.sql.functions._
import org.apache.spark.sql.Row
import org.apache.spark.sql.types._

def checkStringsInRow(row: Row, s: String): Boolean = {
  val stringFields = row.schema.zipWithIndex.
    collect{ case (field, idx) if field.dataType == StringType => idx }

  var i = 0
  while (i < stringFields.size) {
    if (row.getAs[String](stringFields(i)) == s) return true
    i += 1
  }
  false
}

測試方法:

val df = Seq(
  (1, "a", 10L, "xx"),
  (2, "b", 10L, ""),
  (3, null, 10L, "zz")
).toDF("c1", "c2", "c3", "c4")

df.rdd.map(checkStringsInRow(_, "a")).collect
// res1: Array[Boolean] = Array(true, false, false)

df.rdd.map(checkStringsInRow(_, "")).collect
// res2: Array[Boolean] = Array(false, true, false)

df.rdd.map(checkStringsInRow(_, null)).collect
// res3: Array[Boolean] = Array(false, false, true)

如果您喜歡像Row方法那樣使用該方法,則可以在implicit class定義它,如下所示:

import org.apache.spark.sql.Row
import org.apache.spark.sql.types._

implicit class RowOps(row: Row) {
  def checkStringsInRow(s: String): Boolean = {
    val stringFields = row.schema.zipWithIndex.
      collect{ case (field, idx) if field.dataType == StringType => idx }

    var i = 0
    while (i < stringFields.size) {
      if (row.getAs[String](stringFields(i)) == s) return true
      i += 1
    }
    false
  }
}

df.rdd.map(_.checkStringsInRow("a")).collect
// res4: Array[Boolean] = Array(true, false, false)

有什么辦法可以擴展org.apache.spark.sql.Row的功能?

您可以像這樣使用scala implicit類,即RowExt

package utility

import org.apache.commons.lang3.StringUtils
import org.apache.spark.sql.Row

import scala.util.{Failure, Success, Try}

object MyRowExtensions {
  /**
    * RowExt.
    * @param row Row
    */
  implicit class RowExt(row: Row) {
    /**
      * anyEmptyString
      * @return Boolean
      */
    def anyEmptyString(): Boolean = {
      val len = row.length
      var i = 0
      while (i < len) {
        if (row.get(i) != null && StringUtils.isEmpty(row.get(i).toString)) {
          return true
        }
        i += 1
      }
      false
    }

    /**
      * anyStringEqual
      * @param matchStr String
      * @return Boolean
      */
    def anyStringEqual(matchStr: String): Boolean = {
      val len = row.length
      var i = 0
      while (i < len) {
        if (row.get(i) != null && row.get(i).toString == matchStr) {
          return true
        }
        i += 1
      }
      false
    }

    /**
      * anyStringMatched
      *
      * @param matchStr String
      * @return Boolean
      */
    def anyStringMatched(matchStr: String): Boolean = {
      val len = row.length
      var i = 0
      while (i < len) {
        if (row.get(i) != null && StringUtils.containsAny(row.get(i).toString, matchStr)) {
          return true
        }
        i += 1
      }
      false
    }

    /**
      * getAsOption
      * @param fieldName
      * @tparam T
      * @return Option[T]
      */
    def getAsOption[T](fieldName: String): Option[T] = {
      Try(row.getAs[T](fieldName)) match {
        case Success(value) => Some(value)
        case Failure(e) => None
      }
    }
  }
}

來電者看起來像...

import utility.MyRowExtensions.RowExt

myDataFrame.map(r => r.anyStringMatched("myteststring")).show
myDataFrame.filter(_.anyEmptyString).show

我試圖簡單地說:

定義功能:

scala> import org.apache.spark.sql.Row
import org.apache.spark.sql.Row

scala> def anyNull(itm:Row, search: String): Boolean = {  if(itm.toSeq.contains(search)) (true) else (false) }
anyNull: (itm: org.apache.spark.sql.Row, search: String)Boolean

RDD [Row]的用法:

scala> val rdd1 = sc.parallelize(Seq(Row("1","a","A"),Row("2","b", "B"),Row("3","c","C")))
rdd1: org.apache.spark.rdd.RDD[org.apache.spark.sql.Row] = ParallelCollectionRDD[130] at parallelize at <console>:25

scala> rdd1.foreach(println)
[1,a,A]
[2,b,B]
[3,c,C]

scala> rdd1.map(r => (r, anyNull(r,"b"))).foreach(println)
([2,b,B],true)
([3,c,C],false)
([1,a,A],false)

scala> rdd1.map(r => (r, anyNull(r,"a"))).foreach(println)
([1,a,A],true)
([3,c,C],false)
([2,b,B],false)

scala> rdd1.map(r => (r, anyNull(r,""))).foreach(println)
([3,c,C],false)
([2,b,B],false)
([1,a,A],false)

DF的用法:

scala> val df = sc.parallelize(Seq(("1","a","A"),("2","b", "B"),("3","c","C"))).toDF("num", "smallcase", "uppercase")
df: org.apache.spark.sql.DataFrame = [num: string, smallcase: string ... 1 more field]

scala> df.show()
+---+---------+---------+
|num|smallcase|uppercase|
+---+---------+---------+
|  1|        a|        A|
|  2|        b|        B|
|  3|        c|        C|
+---+---------+---------+


scala> df.rdd.map(r => Row(r(0), r(1), r(2))).map(r => (r, anyNull(r,"b"))).foreach(println)
([2,b,B],true)
([1,a,A],false)
([3,c,C],false)

scala> df.rdd.map(r => Row(r(0), r(1), r(2))).map(r => (r, anyNull(r,""))).foreach(println)
([2,b,B],false)
([1,a,A],false)
([3,c,C],false)

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM