簡體   English   中英

如何在 amazon sageMaker 中部署自定義 model

[英]how to deploy the custom model in amazon sageMaker

我是 AWS sagemaker 的新手,我正在嘗試在 sagemaker 中部署時間序列自定義 lstm model,請幫助我以及如何執行腳本模式。 這是我的腳本文件timer_series.py代碼。

import sagemaker
import boto3
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow 
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator

from sklearn.metrics import mean_squared_error


if __name__ =='__main__':

    parser.add_argument('--epochs', type=int, default=50)
    parser.add_argument('--batch_size', type=int, default=72)
    parser.add_argument('--n_train_hours', type=int, default=24*365*2)
    parser.add_argument('--n_validation_hours', type=int, default=24*365*4)

    # input data and model directories
    parser.add_argument('--model_dir', type=str)

    args, _ = parser.parse_known_args()

    train_dataset_dir = os.environ.get('SM_INPUT_DIR') + '/data/training/' 
    output_model_dir = os.environ.get('SM_MODEL_DIR')
    output_object_dir = os.environ.get('SM_OUTPUT_DATA_DIR')

    epochs = args.epochs
    batch_size = args.batch_size
    input_data = {args.input_data}
    dataset = read_csv( train_dataset_dir + 'dataset.csv', header=0, index_col='Date')
    dataset.sort_index(inplace=True)
    train = dataset.iloc[:109]
    test= dataset.iloc[109:]  
    scaler = MinMaxScaler()
    scaled_train = scaler.fit_transform(train)
    scaled_test=scaler.fit_transform(test)
    n_input = 12
    n_feature = 1

    train_generator = TimeseriesGenerator(scaled_train,scaled_train,length=n_input, batch_size=1)

    model = Sequential()

    model.add(LSTM(128,activation = 'relu', input_shape= (n_input, n_feature), return_sequences=True))
    model.add(LSTM(128, activation='relu', return_sequences=True))
    model.add(LSTM(128, activation='relu', return_sequences=False))
    model.add(Dense(1))
    model.compile(optimizer='adam', loss='mse')
    history =model.fit_generator(train_generator,epochs=50, batch_size=1,verbose=1)

# Get a SageMaker-compatible role used by this Notebook Instance.
    role = get_execution_role()
    with open(output_model_dir + '/history.json', 'w') as f:
         json.dump(history.history, f)
    #Save the Scaler
    dump(scaler, output_model_dir + '/scaler.model', protocol=2) 
    #Save the trained model and weights
    model_json = model.to_json()
    with open(output_model_dir + "/model.json", "w") as json_file:
        json_file.write(model_json)
    model.save_weights(output_model_dir + "/model.h5")

這里它顯示了一些錯誤:

 train_instance_type = "ml.m4.xlarg"

tf_estimator = TensorFlow(entry_point='time_series.py', role=get_execution_role(),
                          train_instance_count=1, train_instance_type=train_instance_type,
                          framework_version='1.12', py_version='py3', script_mode=True,
                          output_path = 's3://' + s3Bucket, base_job_name = "sales-forecasting-lstm",
                         hyperparameters={'batch_size': 2,
                                           'epochs': 50})

tf_estimator.fit(uploaded_data_path)

在這里我得到了錯誤。 這是什么錯誤,我不明白這個錯誤。

UnexpectedStatusException: Error for Training job sales-forecasting-lstm-2020-04-13-10-17-34-919: Failed. Reason: AlgorithmError: ExecuteUserScriptError:
Command "/usr/bin/python time_series.py --batch_size 2 --epochs 50 --model_dir s3://sagemaker12/sales-forecasting-lstm-2020-04-13-10-17-34-919/model"

​

嗨,我是 AWS sagemaker 的新手,我正在嘗試在 sagemaker 中部署時間序列自定義 lstm model,請幫助我以及如何配置腳本模式,python 腳本進行部署。 這是我的腳本文件timer_series.py代碼。

import sagemaker
import boto3
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow 
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator

from sklearn.metrics import mean_squared_error


if __name__ =='__main__':

    parser.add_argument('--epochs', type=int, default=50)
    parser.add_argument('--batch_size', type=int, default=72)
    parser.add_argument('--n_train_hours', type=int, default=24*365*2)
    parser.add_argument('--n_validation_hours', type=int, default=24*365*4)

    # input data and model directories
    parser.add_argument('--model_dir', type=str)

    args, _ = parser.parse_known_args()

    train_dataset_dir = os.environ.get('SM_INPUT_DIR') + '/data/training/' 
    output_model_dir = os.environ.get('SM_MODEL_DIR')
    output_object_dir = os.environ.get('SM_OUTPUT_DATA_DIR')

    epochs = args.epochs
    batch_size = args.batch_size
    input_data = {args.input_data}
    dataset = read_csv( input_data + 'dataset.csv', header=0, index_col='Date')
    dataset.sort_index(inplace=True)
    train = dataset.iloc[:109]
    test= dataset.iloc[109:]  
    scaler = MinMaxScaler()
    scaled_train = scaler.fit_transform(train)
    scaled_test=scaler.fit_transform(test)
    n_input = 12
    n_feature = 1

    train_generator = TimeseriesGenerator(scaled_train,scaled_train,length=n_input, batch_size=1)

    model = Sequential()

    model.add(LSTM(128,activation = 'relu', input_shape= (n_input, n_feature), return_sequences=True))
    model.add(LSTM(128, activation='relu', return_sequences=True))
    model.add(LSTM(128, activation='relu', return_sequences=False))
    model.add(Dense(1))
    model.compile(optimizer='adam', loss='mse')
    history =model.fit_generator(train_generator,epochs=50, batch_size=1,verbose=1)

# Get a SageMaker-compatible role used by this Notebook Instance.
    role = get_execution_role()
    with open(output_model_dir + '/history.json', 'w') as f:
         json.dump(history.history, f)
    #Save the Scaler
    dump(scaler, output_model_dir + '/scaler.model', protocol=2) 
    #Save the trained model and weights
    model_json = model.to_json()
    with open(output_model_dir + "/model.json", "w") as json_file:
        json_file.write(model_json)
    model.save_weights(output_model_dir + "/model.h5")

這里它顯示了一些錯誤:

 train_instance_type = "ml.m4.xlarg"

tf_estimator = TensorFlow(entry_point='time_series.py', role=get_execution_role(),
                          train_instance_count=1, train_instance_type=train_instance_type,
                          framework_version='1.12', py_version='py3', script_mode=True,
                          output_path = 's3://' + s3Bucket, base_job_name = "sales-forecasting-lstm",
                         hyperparameters={'batch_size': 2,
                                           'epochs': 50})

tf_estimator.fit(uploaded_data_path)

在這里我得到了錯誤。 這是什么錯誤,我不明白這個錯誤。

UnexpectedStatusException: Error for Training job sales-forecasting-lstm-2020-04-13-10-17-34-919: Failed. Reason: AlgorithmError: ExecuteUserScriptError:
Command "/usr/bin/python time_series.py --batch_size 2 --epochs 50 --model_dir s3://sagemaker12/sales-forecasting-lstm-2020-04-13-10-17-34-919/model"

​

我建議您將 timer_series.py cahnnels 更改為您的 s3 路徑,您不必在 train_dataset_dir 上添加加號並將 sagemaker 特定的 arguments 添加為:

parser.add_argument('--output-data-dir', type=str, default='s3://bucket_name/folder_name/output')

上面的行指示您的 output 數據應存儲在何處。 但是,這是您在調用批量轉換 function 時必須指定的內容。 否則會將其存儲在默認存儲桶中。 其次,為了調試並能夠為您提供幫助,您必須查看 CloudWatch 以更好地了解您的腳本失敗的原因:time_series.py。 我想你的訓練數據的規格和閱讀有問題。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM