簡體   English   中英

如何使用 Airflow 連接到 AWS Emr Notebook

[英]How to Connect to AWS Emr Notebook with Airflow

我想將我的氣流連接到目前在集群上運行的 Emr Notebook 我已成功連接到 AWS EMR 集群,但我無法連接到筆記本,請幫忙。

在下面的代碼中,我將一些文件加載​​到 s3 存儲桶,然后我想在我的集群上執行一些我已經完成的步驟功能,但我也想在我無法連接的 emr 集群上運行預制筆記本。 請幫忙謝謝

from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.S3_hook import S3Hook
from airflow.operators import PythonOperator
from airflow.contrib.operators.emr_create_job_flow_operator import (
    EmrCreateJobFlowOperator,
)
from airflow.contrib.operators.emr_add_steps_operator import EmrAddStepsOperator
from airflow.contrib.sensors.emr_step_sensor import EmrStepSensor
from airflow.contrib.operators.emr_terminate_job_flow_operator import (
    EmrTerminateJobFlowOperator,
)
# Configurations
BUCKET_NAME = "as*****************"  # replace this with your bucket name
local_data = "./dags/data/movie_review.csv"
s3_data = "data/movie_review.csv"
local_script = "./dags/scripts/spark/random_text_classification.py"
s3_script = "scripts/random_text_classification.py"
s3_clean = "clean_data/"
SPARK_STEPS = [ # Note the params values are supplied to the operator
    {
        "Name": "Move raw data from S3 to HDFS",
        "ActionOnFailure": "CANCEL_AND_WAIT",
        "HadoopJarStep": {
            "Jar": "command-runner.jar",
            "Args": [
                "s3-dist-cp",
                "--src=s3://{{ params.BUCKET_NAME }}/data",
                "--dest=/movie",
            ],
        },
    },
    {
        "Name": "Classify movie reviews",
        "ActionOnFailure": "CANCEL_AND_WAIT",
        "HadoopJarStep": {
            "Jar": "command-runner.jar",
            "Args": [
                "spark-submit",
                "--deploy-mode",
                "client",
                "s3://{{ params.BUCKET_NAME }}/{{ params.s3_script }}",
            ],
        },
    },
    {
        "Name": "Move clean data from HDFS to S3",
        "ActionOnFailure": "CANCEL_AND_WAIT",
        "HadoopJarStep": {
            "Jar": "command-runner.jar",
            "Args": [
                "s3-dist-cp",
                "--src=/output",
                "--dest=s3://{{ params.BUCKET_NAME }}/{{ params.s3_clean }}",
            ],
        },
    },
]

# helper function
def _local_to_s3(filename, key, bucket_name=BUCKET_NAME):
    s3 = S3Hook()
    s3.load_file(filename=filename, bucket_name=bucket_name, replace=True, key=key)
default_args = {
    "owner": "airflow",
    "depends_on_past": True,
    "wait_for_downstream": True,
    "start_date": datetime(2020, 10, 17),
    "email": ["airflow@airflow.com"],
    "email_on_failure": False,
    "email_on_retry": False,
    "retries": 1,
    "retry_delay": timedelta(minutes=5),
}
dag = DAG(
    "spark_submit_airflow",
    default_args=default_args,
    schedule_interval="0 10 * * *",
    max_active_runs=1,
)

start_data_pipeline = DummyOperator(task_id="start_data_pipeline", dag=dag)

data_to_s3 = PythonOperator(
    dag=dag,
    task_id="data_to_s3",
    python_callable=_local_to_s3,
    op_kwargs={"filename": local_data, "key": s3_data,},
)
script_to_s3 = PythonOperator(
    dag=dag,
    task_id="script_to_s3",
    python_callable=_local_to_s3,
    op_kwargs={"filename": local_script, "key": s3_script,},
)

# Add your steps to the EMR cluster
step_adder = EmrAddStepsOperator(
    task_id="add_steps",
    job_flow_id="j-***********", #cluster id
    aws_conn_id="aws_default",
    steps=SPARK_STEPS,
    params={ # these params are used to fill the paramterized values in SPARK_STEPS json
        "BUCKET_NAME": BUCKET_NAME,
        "s3_data": s3_data,
        "s3_script": s3_script,
        "s3_clean": s3_clean,
    },
    dag=dag,
)
last_step = len(SPARK_STEPS) - 1
# wait for the steps to complete
step_checker = EmrStepSensor(
    task_id="watch_step",
    job_flow_id="j-*************",#cluster ID
    step_id="{{ task_instance.xcom_pull(task_ids='add_steps', key='return_value')["
    + str(last_step)
    + "] }}",
    aws_conn_id="aws_default",
    dag=dag,
)

end_data_pipeline = DummyOperator(task_id="end_data_pipeline", dag=dag)

start_data_pipeline >> [data_to_s3, script_to_s3]  >> step_adder >> step_checker >> end_data_pipeline


我認為我們目前還沒有用於筆記本的 emr 運算符。

為了運行預制的 emr 筆記本,您可以通過提供預制筆記本的路徑來使用boto3 emr 客戶端的方法start_notebook_execution

制作一個執行start_notebook_execution的自定義 python 運算符並在您的管道中使用它。 在這個自定義 python 運算符中,您將需要一個 clusterID,在您的情況下,它是從EmrAddStepsOperator (step_adder) 返回的

def start_nb_execution(cluster_id,**context):
    
    emr = boto3.client('emr', region_name=REGION)
    start_nb = emr.start_notebook_execution(
        EditorId="YOUR_NOTEBOOK_ID",
        RelativePath="YOUR_NOTEBOOK_FILE_NAME",
        ExecutionEngine={'Id': cluster_id, 'Type': 'EMR'},
        ServiceRole='EMR_Notebooks_DefaultRole'
    )
    execution_id = start_nb['NotebookExecutionId']
    print("Started an execution: " + execution_id)
    return execution_id

將此函數稱為 PythonOperator

start_nb_execution = PythonOperator(
    task_id='start_nb_execution', 
    python_callable=start_execution,
    provide_context=True,
   op_kwargs={"cluster_id":step_adder},
)

現在您可以將其添加到管道中

start_data_pipeline >> [data_to_s3, script_to_s3]  >> step_adder >> step_checker >> start_nb_execution >> end_data_pipeline

有一個很好的教程在這里,也有用於筆記本傳感器例如

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM