簡體   English   中英

如何在 tensorflow_federated TFF 中調整 CIFAR100 的超參數而不降低精度?

[英]How to tune hyper parameters for CIFAR100 in tensorflow_federated TFF without dropping in the accuracy?

我正在嘗試使用CIFAR100 數據集測試本教程https://www.tensorflow.org/federated/tutorials/tff_for_federated_learning_research_compression ,但准確性每輪都在下降

我對超參數的調整是原因嗎?

這是我的代碼:

cifar_train, cifar_test = tff.simulation.datasets.cifar100.load_data()

MAX_CLIENT_DATASET_SIZE = 418

CLIENT_EPOCHS_PER_ROUND = 1
CLIENT_BATCH_SIZE = 20
TEST_BATCH_SIZE = 500

def reshape_cifar_element(element):
  return (tf.expand_dims(element['image'], axis=-1), element['label'])

def preprocess_train_dataset(dataset):
  """Preprocessing function for the EMNIST training dataset."""
  return (dataset
          # Shuffle according to the largest client dataset
          .shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)
          # Repeat to do multiple local epochs
          .repeat(CLIENT_EPOCHS_PER_ROUND)
          # Batch to a fixed client batch size
          .batch(CLIENT_BATCH_SIZE, drop_remainder=False)
          # Preprocessing step
          .map(reshape_cifar_element))

cifar_train = cifar_train.preprocess(preprocess_train_dataset)

# defining a model 
def create_original_fedavg_cnn_model():
  data_format = 'channels_last'

  max_pool = functools.partial(
      tf.keras.layers.MaxPooling2D,
      pool_size=(2, 2),
      padding='same',
      data_format=data_format)
  conv2d = functools.partial(
      tf.keras.layers.Conv2D,
      kernel_size=5,
      padding='same',
      data_format=data_format,
      activation=tf.nn.relu)

  model = tf.keras.models.Sequential([
      tf.keras.layers.InputLayer(input_shape=(32, 32, 3)),
      conv2d(filters=32),
      max_pool(),
      conv2d(filters=64),
      max_pool(),
      tf.keras.layers.Flatten(),
      tf.keras.layers.Dense(512, activation=tf.nn.relu),
      tf.keras.layers.Dense(100, activation=None),
      tf.keras.layers.Softmax(),
  ])
  return model

input_spec = cifar_train.create_tf_dataset_for_client(
    cifar_train.client_ids[0]).element_spec

def tff_model_fn():
  keras_model = create_original_fedavg_cnn_model()
  return tff.learning.from_keras_model(
      keras_model=keras_model,
      input_spec=input_spec,
      loss=tf.keras.losses.SparseCategoricalCrossentropy(),
      metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])

# training the model 
federated_averaging = tff.learning.build_federated_averaging_process(
    model_fn=tff_model_fn,
    client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.05),
    server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))

# utility function
def format_size(size):
  size = float(size)
  for unit in ['bit','Kibit','Mibit','Gibit']:
    if size < 1024.0:
      return "{size:3.2f}{unit}".format(size=size, unit=unit)
    size /= 1024.0
  return "{size:.2f}{unit}".format(size=size, unit='TiB')

def set_sizing_environment():
  sizing_factory = tff.framework.sizing_executor_factory()
  context = tff.framework.ExecutionContext(executor_fn=sizing_factory)
  tff.framework.set_default_context(context)

  return sizing_factory

def train(federated_averaging_process, num_rounds, num_clients_per_round, summary_writer):
  environment = set_sizing_environment()

  # Initialize the Federated Averaging algorithm to get the initial server state.
  state = federated_averaging_process.initialize()

  with summary_writer.as_default():
    for round_num in range(num_rounds):
      # Sample the clients parcitipated in this round.
      sampled_clients = np.random.choice(
          cifar_train.client_ids,
          size=num_clients_per_round,
          replace=False)
      # Create a list of `tf.Dataset` instances from the data of sampled clients.
      sampled_train_data = [
          cifar_train.create_tf_dataset_for_client(client)
          for client in sampled_clients
      ]
      state, metrics = federated_averaging_process.next(state, sampled_train_data)

      size_info = environment.get_size_info()
      broadcasted_bits = size_info.broadcast_bits[-1]
      aggregated_bits = size_info.aggregate_bits[-1]

      print('round {:2d}, metrics={}, broadcasted_bits={}, aggregated_bits={}'.format(round_num, metrics, format_size(broadcasted_bits), format_size(aggregated_bits)))

      # Add metrics to Tensorboard.
      for name, value in metrics['train'].items():
          tf.summary.scalar(name, value, step=round_num)

      # Add broadcasted and aggregated data size to Tensorboard.
      tf.summary.scalar('cumulative_broadcasted_bits', broadcasted_bits, step=round_num)
      tf.summary.scalar('cumulative_aggregated_bits', aggregated_bits, step=round_num)
      summary_writer.flush()

# Clean the log directory to avoid conflicts.
try:
  tf.io.gfile.rmtree('/tmp/logs/scalars')
except tf.errors.OpError as e:
  pass  # Path doesn't exist

# Set up the log directory and writer for Tensorboard.
logdir = "/tmp/logs/scalars/original/"
summary_writer = tf.summary.create_file_writer(logdir)

train(federated_averaging_process=federated_averaging, num_rounds=10,
      num_clients_per_round=100, summary_writer=summary_writer)

這是輸出:

round  0, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0299), ('loss', 15.586388), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=6.56Gibit, aggregated_bits=6.56Gibit
round  1, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0046), ('loss', 16.042076), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=13.13Gibit, aggregated_bits=13.13Gibit
round  2, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0107), ('loss', 15.945647), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=19.69Gibit, aggregated_bits=19.69Gibit
round  3, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0104), ('loss', 15.950482), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=26.26Gibit, aggregated_bits=26.26Gibit
round  4, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0115), ('loss', 15.932754), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=32.82Gibit, aggregated_bits=32.82Gibit
round  5, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0111), ('loss', 15.9391985), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=39.39Gibit, aggregated_bits=39.39Gibit
round  6, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0112), ('loss', 15.937586), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=45.95Gibit, aggregated_bits=45.95Gibit
round  7, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.012), ('loss', 15.924692), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=52.52Gibit, aggregated_bits=52.52Gibit
round  8, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0105), ('loss', 15.948869), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=59.08Gibit, aggregated_bits=59.08Gibit
round  9, metrics=OrderedDict([('broadcast', ()), ('aggregation', OrderedDict([('mean_value', ()), ('mean_weight', ())])), ('train', OrderedDict([('sparse_categorical_accuracy', 0.0096), ('loss', 15.963377), ('num_examples', 10000), ('num_batches', 500)]))]), broadcasted_bits=65.64Gibit, aggregated_bits=65.64Gibit

這是輸入結構:

OrderedDict([('coarse_label', TensorSpec(shape=(), dtype=tf.int64, name=None)), ('image', TensorSpec(shape=(32, 32, 3), dtype=tf.uint8, name=None)), ('label', TensorSpec(shape=(), dtype=tf.int64, name=None))])

我不知道我的錯誤在哪里!

  • create_original_fedavg_cnn_model() 中層中定義的超參數是否錯誤? 還是在 preprocess_train_dataset() 中?

  • 如何為 CIFAR100 數據集調整相同教程的參數?

感謝任何幫助! 謝謝。

這里有幾點注意事項:

  1. 由於它似乎在訓練(雖然不是很好),一些最顯着的超參數將是優化器的學習率。 您可能想在那里嘗試其他超參數,甚至其他優化器。

  2. 您使用的 CNN 模型非常小,可能在整個 CIFAR-100 上表現不佳。 要做的一件有用的事情是首先嘗試以集中方式在數據集上訓練模型(作為一致性檢查),然后繼續進行聯合訓練。

  3. 關於如何初始化超參數設置的一個很好的經驗法則是采用在集中訓練中運行良好的優化器/超參數(參見第 2 條),並將它們用作客戶端優化器,同時將服務器優化器保持為學習率 1 的 SGD . 這可能不是最優的,但通常可以做得很好。

不幸的是,模型訓練仍然是一門藝術,而不是一門科學,聯合訓練可能不同於集中訓練。 因此,可能需要進行一些反復試驗。 祝你好運。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM