![](/img/trans.png)
[英]Uploading File from HDFS to S3 using FileUtil.copy in Spark causing DiskErrorException: Directory is not writable error?
[英]Copy from S3 TO HDFS Using Spark
將文件從S3寫入HDFS時出現ConnectionTimeOutException
。
嘗試添加超時參數:
import java.io.IOException
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.FileUtil
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkConf
import org.apache.spark.sql._
import org.apache.spark.sql.SparkSession
import scala.collection.JavaConversions._
val sparkSession:SparkSession=SparkSession.builder().master("yarn").appName("To hdfs").getOrCreate
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.awsAccessKeyId", "XXXXXXXXXXXXXXXXXxx")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.attempts.maximum", "30")
sparkSession.sparkContext.hadoopConfiguration.set("spark.speculation", "false")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.proxy.host","webproxy.e.corp.services")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.proxy.port","80")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.connection.ssl.enabled","true")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.endpoint","s3.us-east-1.amazonaws.com")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.awsSecretAccessKey", "XXXXXXXXXXXXXXXXXx")
sparkSession.sparkContext.hadoopConfiguration.set("mapreduce.fileoutputcommitter.algorithm.version", "2")
sparkSession.sparkContext.hadoopConfiguration.set("spark.yarn.queue","root.ecpdevingest")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.connection.establish.timeout","10000")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.connection.timeout","50000")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.threads.max","100")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.threads.core","5")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.multipart.size","104857600")
sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.multipart.threshold","2147483647")
sparkSession.sparkContext.hadoopConfiguration.set("spark.executor.instances", "8")
sparkSession.sparkContext.hadoopConfiguration.set("spark.executor.cores", "4")
sparkSession.sparkContext.hadoopConfiguration.set("spark.executor.memory", "32g")
sparkSession.sparkContext.hadoopConfiguration.set("spark.driver.memory", "4g")
sparkSession.sparkContext.hadoopConfiguration.set("spark.driver.cores", "2")
val file = sparkSession.sparkContext.textFile("s3a://acrXXXXXXXXXXXXXXXXX5.avro", 11)
file.saveAsObjectFile("hdfs://c411apy.int.westgroup.com:8020/project/ecpdevingest/avro/100")
錯誤是:
org.apache.http.conn.ConnectTimeoutException: Connect to acron-avro-bucket.s3.amazonaws.com:443 timed out
at org.apache.http.conn.ssl.SSLSocketFactory.connectSocket(SSLSocketFactory.java:416)
at org.apache.http.impl.conn.DefaultClientConnectionOperator.openConnection(DefaultClientConnectionOperator.java:180)
at org.apache.http.impl.conn.AbstractPoolEntry.open(AbstractPoolEntry.java:151)
at org.apache.http.impl.conn.AbstractPooledConnAdapter.open(AbstractPooledConnAdapter.java:125)
at org.apache.http.impl.client.DefaultRequestDirector.tryConnect(DefaultRequestDirector.java:643)
at org.apache.http.impl.client.DefaultRequestDirector.execute(DefaultRequestDirector.java:479)
at org.apache.http.impl.client.AbstractHttpClient.execute(AbstractHttpClient.java:906)
at org.apache.http.impl.client.AbstractHttpClient.execute(AbstractHttpClient.java:805)
at org.jets3t.service.impl.rest.httpclient.RestStorageService.performRequest(RestStorageService.java:334)
at org.jets3t.service.impl.rest.httpclient.RestStorageService.performRequest(RestStorageService.java:281)
at org.jets3t.service.impl.rest.httpclient.RestStorageService.performRestHead(RestStorageService.java:942)
at org.jets3t.service.impl.rest.httpclient.RestStorageService.getObjectImpl(RestStora
s3a的證書密鑰與s3和s3n的使用密鑰不同:
spark.sparkContext.hadoopConfiguration.set("fs.s3a.secret.key", "XXX")
spark.sparkContext.hadoopConfiguration.set("fs.s3a.access.key", "XXX")
org.apache.hadoop.fs.s3native.NativeS3FileSystem
。 您已完成此操作,因此不會讀取和使用fs.s3a選項中的任何一個。 刪除該聲明,然后重試。
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.