簡體   English   中英

tensorflow 張量板 hparams

[英]tensorflow tensorboard hparams

import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
####### load the model and data here

mnist = tf.keras.datasets.mnist

(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([32,64,128,256, 512]))
HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.1, 0.9))
HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['Nadam','SGD','RMSprop','adam','Adagrad']))
HP_L2 = hp.HParam('l2 regularizer', hp.RealInterval(.00001,.01))
HP_LeakyReLU=hp.HParam('alpha', hp.RealInterval(0.1, 0.9))
METRIC_ACCURACY = 'accuracy'

with tf.summary.create_file_writer('raw-img/log/hparam_tuning/').as_default():
  hp.hparams_config(
    hparams=[HP_NUM_UNITS, HP_DROPOUT, HP_OPTIMIZER,HP_L2,HP_LeakyReLU],
    metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
  )
def train_test_model(hparams):
  model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(hparams[HP_NUM_UNITS], kernel_regularizer=tf.keras.regularizers.l2(0.001)),
    tf.keras.layers.LeakyReLU(hparams[HP_LeakyReLU]),
    tf.keras.layers.Dropout(hparams[HP_DROPOUT]),
    tf.keras.layers.Dense(10, activation='softmax'),
  ])
  model.compile(
      optimizer=hparams[HP_OPTIMIZER],
      loss='sparse_categorical_crossentropy',
      metrics=['accuracy'],
  )

  model.fit(x_train, y_train, epochs=2)
  _, accuracy = model.evaluate(x_test, y_test)
  return accuracy
def run(run_dir, hparams):
  with tf.summary.create_file_writer(run_dir).as_default():
    hp.hparams(hparams)  # record the values used in this trial
    accuracy = train_test_model(hparams)
    tf.summary.scalar(METRIC_ACCURACY, accuracy, step=2)
session_num = 0

for num_units in HP_NUM_UNITS.domain.values:
  for dropout_rate in (HP_DROPOUT.domain.min_value, HP_DROPOUT.domain.max_value):
    for l2 in (HP_L2.domain.min_value, HP_L2.domain.max_value):
        for alpha in (HP_LeakyReLU.domain.min_value, HP_LeakyReLU.domain.max_value):
             for optimizer in HP_OPTIMIZER.domain.values:
                    hparams = {
                        HP_NUM_UNITS: num_units,
                        HP_DROPOUT: dropout_rate,
                        HP_L2: l2,
                        HP_LeakyReLU:alpha,
                        HP_OPTIMIZER: optimizer,
                    }
                    run_name = "run-%d" % session_num
                    print('--- Starting trial: %s' % run_name)
                    print({h.name: hparams[h] for h in hparams})
                    run('raw-img/log/hparam_tuning/' + run_name, hparams)
                    session_num += 1

               

我曾嘗試在 TF 中使用hparams 我設置了dropoutl2OPTIMIZER

我需要為learning_rate設置值並對其進行測試。 我應該怎么做才能像dropoutl2一樣設置learning_rate並進行測試?

我試過這樣做:

model.compile(
      optimizer=hparams[HP_OPTIMIZER](lr=0.001),
      loss='sparse_categorical_crossentropy',
      metrics=['accuracy'],

但它不起作用。 我想要 select learning_rate learning_rate不同值,例如( dropoutl2

您想要將使用的優化器分離到一個單獨的變量中:

if hparams[HP_OPTIMIZER] == "SGD":
    optimizer = tf.keras.optimizers.SGD(learning_rate=float(hparams[HP_LR]))
  elif hparams[HP_OPTIMIZER] == "adam":
    optimizer = tf.keras.optimizers.Adam(learning_rate=float(hparams[HP_LR]))
  else:
    raise ValueError("unexpected optimizer name: %r" % hparams[HP_OPTIMIZER])
  model.compile(
      optimizer=optimizer,
      loss='sparse_categorical_crossentropy',
      metrics=['accuracy'],
  )

我在這里找到了解決方案。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM