簡體   English   中英

Python 線程池在掃描 AWS S3 時比 Go 例程更快?

[英]Python Thread Pool Faster than Go Routines when Scanning AWS S3?

我最近一直在深入了解 Golang 並發,特別是通道和工作池的使用。 I wanted to compare performance between Go and Python (as many have done) because I have mostly read that Go outperforms Python with regard to concurrency. 因此,我編寫了兩個程序來掃描 AWS 賬戶的 S3 存儲桶並報告總大小。 我在一個帳戶上執行了此操作,該帳戶有更多的 75 個存儲桶,總計超過幾 TB 的數據。

我驚訝地發現我的 Python 實現比我的 Go 實現快了近 2 倍。 基於我讀過的所有基准和文獻,這讓我感到困惑。 這讓我相信我沒有正確實現我的 Go 代碼。 在觀看這兩個程序運行時,我注意到 Go 實現僅使用了我的 CPU 的 15%,而 Python 使用了 >85%。 我是否錯過了 Go 的重要步驟,或者我在實施過程中遺漏了什么? 提前致謝!

Python 代碼:

'''
Get the size of all objects in all buckets in S3
'''
import os
import sys
import boto3
import concurrent.futures

def get_s3_bucket_sizes(aws_access_key_id, aws_secret_access_key, aws_session_token=None):

    s3client = boto3.client('s3')

    # Create the dictionary which will be indexed by the bucket's
    # name and has an S3Bucket object as its contents
    buckets = {}

    total_size = 0.0

    #
    # Start gathering data...
    #

    # Get all of the buckets in the account
    _buckets = s3client.list_buckets()

    cnt = 1
    with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
        future_bucket_to_scan = {executor.submit(get_bucket_objects, s3client, bucket): bucket for bucket in _buckets["Buckets"]}

        for future in concurrent.futures.as_completed(future_bucket_to_scan):
            bucket_object = future_bucket_to_scan[future]

            try:
                ret = future.result()
            except Exception as exc:
                print('ERROR: %s' % (str(exc)))
            else:
                total_size += ret

    print(total_size)

def get_bucket_objects(s3client, bucket):

    name = bucket["Name"]

    # Get all of the objects in the bucket
    lsbuckets = s3client.list_objects(Bucket=name)

    size = 0
    while True:
        if "Contents" not in lsbuckets.keys():
            break

        for content in lsbuckets["Contents"]:            
            size += content["Size"]

        break

    return size

#
# Main
#
if __name__=='__main__':
    get_s3_bucket_sizes(os.environ.get("AWS_ACCESS_KEY_ID"), os.environ.get("AWS_SECRET_ACCESS_KEY"))

Go 代碼:

package main

import (
    "fmt"
    "sync"

    "github.com/aws/aws-sdk-go/aws"
    "github.com/aws/aws-sdk-go/aws/awserr"
    "github.com/aws/aws-sdk-go/aws/session"
    "github.com/aws/aws-sdk-go/service/s3"
)

type S3_Bucket_Response struct {
    bucket string
    count  int64
    size   int64
    err    error
}

type S3_Bucket_Request struct {
    bucket string
    region string
}

func get_bucket_objects_async(wg *sync.WaitGroup, requests chan S3_Bucket_Request, responses chan S3_Bucket_Response) {

    var size  int64
    var count int64

    for request := range requests {
        bucket := request.bucket
        region := request.region

        // Create a new response
        response := new(S3_Bucket_Response)
        response.bucket = bucket

        sess, err := session.NewSession(&aws.Config{
            Region: aws.String(region), 
        })

        s3conn := s3.New(sess)

        resp, err := s3conn.ListObjectsV2(&s3.ListObjectsV2Input{
            Bucket: aws.String(bucket),
        })

        if err != nil {
            if awsErr, ok := err.(awserr.Error); ok {

                switch awsErr.Code() {
                case "NoSuchBucket":
                    response.err = fmt.Errorf("Bucket: (%s) is NoSuchBucket.  Must be in process of deleting.", bucket)
                case "AccessDenied":
                    response.err = fmt.Errorf("Bucket: (%s) is AccessDenied.  You should really be running this with full Admin Privaleges", bucket)
                }
            } else {
                response.err = fmt.Errorf("Listing Objects Unhandled Error: %s ", err)
            }

            responses <- *response
            continue
        } 

        contents := resp.Contents
        size      = 0
        count     = 0

        for i:=0; i<len(contents); i++ {
            size  += *contents[i].Size
            count += 1
        }

        response.size  = size
        response.count = count

        responses <- *response
    }

    wg.Done()
}

func main() {

    var err  error
    var size int64
    var resp *s3.ListBucketsOutput
    var wg sync.WaitGroup

    sess, _ := session.NewSession()
    s3conn  := s3.New(sess)

    // Get account bucket listing
    if resp, err = s3conn.ListBuckets(&s3.ListBucketsInput{});err != nil {
        fmt.Println("Error listing buckets: %s", err)
        return 
    }

    buckets := resp.Buckets
    size = 0

    // Create the buffered channels
    requests  := make(chan S3_Bucket_Request , len(buckets))
    responses := make(chan S3_Bucket_Response, len(buckets))

    for i := range buckets {

        bucket := *buckets[i].Name

        resp2, err := s3conn.GetBucketLocation(&s3.GetBucketLocationInput{                                                           
            Bucket: aws.String(bucket),                                                                                                       
        })         

        if err != nil {
            fmt.Printf("Could not get bucket location for bucket (%s): %s", bucket, err)
            continue
        }

        wg.Add(1)
        go get_bucket_objects_async(&wg, requests, responses)

        region := "us-east-1"
        if resp2.LocationConstraint != nil {
            region = *resp2.LocationConstraint
        }

        request := new(S3_Bucket_Request)
        request.bucket = bucket
        request.region = region

        requests <- *request        
    }

    // Close requests channel and wait for responses
    close(requests)
    wg.Wait()
    close(responses)

    cnt := 1
    // Process the results as they come in
    for response := range responses {

        fmt.Printf("Bucket: (%s) complete!  Buckets remaining: %d\n", response.bucket, len(buckets)-cnt)

        // Did the bucket request have errors?
        if response.err != nil {
            fmt.Println(response.err)
            continue
        }

        cnt  += 1
        size += response.size
    }

    fmt.Println(size)
    return 
}


抱歉,沒有機會對此進行全面審查,但我的回答是:解決方案在並發方面似乎並不相同 3件事彈出:

  • boto s3 客戶端的線程安全。 這個線程安全嗎? 你能確認嗎? 這篇 reddit 文章表明它不是線程安全的。
  • Python 使用的工作池大小為 50,但 go 是無界的。 可以使用信號量為當前代碼添加 50 的上限)
  • 我對 boto 不是很熟悉,但看起來 go 與 python 相比,在每個存儲桶的主線程( GetBucketLocation )上執行了額外的 IO 調用。

我的下一個問題是:

  • 每個解決方案都正確嗎?你能證明嗎? (兩者總和到相同數量的字節並且與 s3 控制台匹配嗎?)
  • 你確定並發結構是相同的,即在主線程上求和,相同的池大小,相同數量的 IO 每個工人的工作量。
  • 客戶端的默認值是否相同?即 python 是否有默認連接池大小? Go 沒有,所以它會為每個請求創建連接(我上周剛遇到這個)

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM