簡體   English   中英

調用 z:org.apache.spark.api.python.PythonRDD.collectAndServe 時出錯

[英]An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe

我是新手,在將 .csv 文件轉換為數據框時遇到錯誤。 我正在使用 pyspark_csv 模塊進行轉換,但出現錯誤,這是錯誤的堆棧跟蹤,任何人都可以給我解決此錯誤的建議

 --------------------------------------------------------------------------- Py4JJavaError Traceback (most recent call last) <ipython-input-16-67fe725a8e27> in <module>() ----> 1 data_df = pycsv.csvToDataFrame(sqlCtx, data_body, sep=",", columns=data_header.split('\t')).cache() /usr/spark-1.5.0/python/pyspark_csv.py in csvToDataFrame(sqlCtx, rdd, columns, sep, parseDate) 51 rdd_sql = rdd_array.zipWithIndex().filter( 52 lambda r_i: r_i[1] > 0).keys() ---> 53 column_types = evaluateType(rdd_sql, parseDate) 54 55 def toSqlRow(row): /usr/spark-1.5.0/python/pyspark_csv.py in evaluateType(rdd_sql, parseDate) 177 def evaluateType(rdd_sql, parseDate): 178 if parseDate: --> 179 return rdd_sql.map(getRowType).reduce(reduceTypes) 180 else: 181 return rdd_sql.map(getRowTypeNoDate).reduce(reduceTypes) /usr/spark-1.5.0/python/pyspark/rdd.py in reduce(self, f) 797 yield reduce(f, iterator, initial) 798 --> 799 vals = self.mapPartitions(func).collect() 800 if vals: 801 return reduce(f, vals) /usr/spark-1.5.0/python/pyspark/rdd.py in collect(self) 771 """ 772 with SCCallSiteSync(self.context) as css: --> 773 port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd()) 774 return list(_load_from_socket(port, self._jrdd_deserializer)) 775 /usr/spark-1.5.0/python/lib/py4j-0.8.2.1-src.zip/py4j/java_gateway.py in __call__(self, *args) 536 answer = self.gateway_client.send_command(command) 537 return_value = get_return_value(answer, self.gateway_client, --> 538 self.target_id, self.name) 539 540 for temp_arg in temp_args: /usr/spark-1.5.0/python/lib/py4j-0.8.2.1-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name) 298 raise Py4JJavaError( 299 'An error occurred while calling {0}{1}{2}.\n'. --> 300 format(target_id, '.', name), value) 301 else: 302 raise Py4JError( Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe. : org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 10.0 failed 1 times, most recent failure: Lost task 0.0 in stage 10.0 (TID 20, localhost): org.apache.spark.api.python.PythonException: Traceback (most recent call last): File "/usr/spark-1.5.0/python/lib/pyspark.zip/pyspark/worker.py", line 111, in main process() File "/usr/spark-1.5.0/python/lib/pyspark.zip/pyspark/worker.py", line 106, in process serializer.dump_stream(func(split_index, iterator), outfile) File "/usr/spark-1.5.0/python/lib/pyspark.zip/pyspark/serializers.py", line 263, in dump_stream vs = list(itertools.islice(iterator, batch)) File "/usr/spark-1.5.0/python/pyspark/rdd.py", line 797, in func yield reduce(f, iterator, initial) File "/tmp/spark-d85b88bf-e4a4-46b8-8b51-eaf0f03e48ab/userFiles-40f9eb34-4efa-4ffb-aaf5-ebcb24a4ecb9/pyspark_csv.py", line 160, in reduceTypes b_type = b[col] IndexError: list index out of range at org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:138) at org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:179) at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:97) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297) at org.apache.spark.rdd.RDD.iterator(RDD.scala:264) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) at org.apache.spark.scheduler.Task.run(Task.scala:88) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:745) Driver stacktrace: at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1280) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1268) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1267) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47) at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1267) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:697) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:697) at scala.Option.foreach(Option.scala:236) at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:697) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1493) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1455) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1444) at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48) at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:567) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1813) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1826) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1839) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1910) at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:905) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:147) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:108) at org.apache.spark.rdd.RDD.withScope(RDD.scala:306) at org.apache.spark.rdd.RDD.collect(RDD.scala:904) at org.apache.spark.api.python.PythonRDD$.collectAndServe(PythonRDD.scala:373) at org.apache.spark.api.python.PythonRDD.collectAndServe(PythonRDD.scala) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231) at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:379) at py4j.Gateway.invoke(Gateway.java:259) at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133) at py4j.commands.CallCommand.execute(CallCommand.java:79) at py4j.GatewayConnection.run(GatewayConnection.java:207) at java.lang.Thread.run(Thread.java:745) Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last): File "/usr/spark-1.5.0/python/lib/pyspark.zip/pyspark/worker.py", line 111, in main process() File "/usr/spark-1.5.0/python/lib/pyspark.zip/pyspark/worker.py", line 106, in process serializer.dump_stream(func(split_index, iterator), outfile) File "/usr/spark-1.5.0/python/lib/pyspark.zip/pyspark/serializers.py", line 263, in dump_stream vs = list(itertools.islice(iterator, batch)) File "/usr/spark-1.5.0/python/pyspark/rdd.py", line 797, in func yield reduce(f, iterator, initial) File "/tmp/spark-d85b88bf-e4a4-46b8-8b51-eaf0f03e48ab/userFiles-40f9eb34-4efa-4ffb-aaf5-ebcb24a4ecb9/pyspark_csv.py", line 160, in reduceTypes b_type = b[col] IndexError: list index out of range at org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:138) at org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:179) at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:97) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297) at org.apache.spark.rdd.RDD.iterator(RDD.scala:264) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) at org.apache.spark.scheduler.Task.run(Task.scala:88) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) ... 1 more

這是我的代碼,最后聲明它在從 csv 轉換為數據框時給出了這個錯誤

 import findspark findspark.init() findspark.find() import pyspark sc=pyspark.SparkContext(appName="myAppName") sqlCtx = pyspark.SQLContext #csv to dataframe sc.addPyFile('/usr/spark-1.5.0/python/pyspark_csv.py') import pyspark_csv as pycsv def skip_header(idx, iterator): if(idx == 0): next(iterator) return iterator data=sc.textFile('gdeltdata/20160427.CSV') data_header = data.first() data_body = data.mapPartitionsWithIndex(skip_header) data_df = pycsv.csvToDataFrame(sqlCtx, data_body, sep=",", columns=data_header.split('\t'))

我實際上無法發表評論,但是,沒有任何代碼,我將不得不猜測您正在嘗試引用在確實存在的字符串上不存在的索引-這與執行以下操作相同:

string = 'hello' new_char = string[6]

這將嘗試在5個字母的字符串上找到第7個字母-這將帶來以下錯誤:

IndexError: string index out of range

由於我看不到導致該錯誤的代碼,因此我就您的問題提供了所有這些。

看起來您的 CSV 文件格式不正確。

實際的錯誤更進一步:

  File "/tmp/spark-d85b88bf-e4a4-46b8-8b51-eaf0f03e48ab/userFiles-40f9eb34-4efa-4ffb-aaf5-ebcb24a4ecb9/pyspark_csv.py", line 160, in reduceTypes
    b_type = b[col]
IndexError: list index out of range

這表明其中一行沒有預期的列數(它可能是文件末尾的空白行)。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM