繁体   English   中英

如何将int32类型的4D numpy数组转换为tfrecords?

[英]How to convert a 4D numpy array of type int32 to tfrecords?

我有一个高光谱数据集,它是一个具有尺寸(num_images, height=7, width=7, num_channels=144)和数据类型int32的numpy数组。

标签数组为(batch_size, num_classes=15) 我想将其转换为tf.records并正确读回。

到目前为止,我已经阅读了许多博客,并尝试了许多不同的方法,但所有方法都失败了。 这是我尝试过的?

问题是当我使用它训练模型时,代码不会引发错误,但是当我将其与使用numpy数组训练模型的情况进行比较时,它的准确性结果没有任何意义。

问题是我在代码中哪里出错? 在转换为tfrecords并重新读取之后是否会出错?

def wrap_int64(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))


def wrap_bytes(value):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))


def convert(images, labels, save_path, save_name):

"""

:param images: np.ndarray containing images with shape (num_images, 
 height, width, num_channels)
:param labels: np.ndarray containing labels with shape (num_labels,),
 i.e. one_hot=False
:param save_path: path in which we save the tfrecords
:return:
"""




out_path = os.path.join(save_path, save_name)
print("Converting: " + out_path)

assert images.dtype == np.int32

# Number of images
num_images = len(images)
print(num_images)
with tf.python_io.TFRecordWriter(out_path) as writer:
    for i in range(num_images):

        # Load a single image
        img = images[i]
        label = labels[i]

        # Convert the image to raw bytes.
        img_bytes = img.tostring()

        image_shape = np.array(np.shape(image)).astype(np.int32)

        # Convert the image to raw bytes.
        #########################################################
        # There is no need to flatten each image!!!
        ###########################################################
        img_bytes = image.tostring()
        img_shape_bytes = image_shape.tostring()
        # Create a dict with the data we want to save in the
        # TFRecords file. You can add more relevant data here.
        data = \
            {
                'image': wrap_bytes(tf.compat.as_bytes(img_bytes)),
                'image_shape': wrap_bytes(tf.compat.as_bytes(img_shape_bytes)),

                'label': wrap_int64(label)

            }

        # Wrap the data as TensorFlow Features.
        feature = tf.train.Features(feature=data)


        # Wrap again as a TensorFlow Example.
        example = tf.train.Example(features=feature)

        # Serialize the data.
        serialized = example.SerializeToString()

        # Write the serialized data to the TFRecords file.
        writer.write(serialized)
def input_fn(filenames, num_classes, normalization_factor, train, batch_size=1024, prefetch_buffer_size=5):


    buffer_size = 10 * batch_size

    dataset = tf.data.TFRecordDataset(filenames=filenames)

    dataset = dataset.map(lambda x: parse(x, num_classes, normalization_factor))
    if train:
        dataset = dataset.shuffle(buffer_size=buffer_size)

    # Allow infinite reading of the data.
        num_repeat = None
    else:
        num_repeat = 1

    # Repeat the dataset the given number of times.
    dataset = dataset.repeat(num_repeat)

    # Get a batch of data with the given size.
    dataset = dataset.batch(batch_size)

    dataset = dataset.prefetch(buffer_size=prefetch_buffer_size)

    # Create an iterator for the dataset and the above modifications.
    iterator = dataset.make_one_shot_iterator()

    # Get the next batch of images and labels.
    batch_images_tf, batch_labels_tf = iterator.get_next()

    return batch_images_tf, batch_labels_tf

 def input_fn(filenames, num_classes, normalization_factor, train, batch_size=1024, prefetch_buffer_size=5): buffer_size = 10 * batch_size dataset = tf.data.TFRecordDataset(filenames=filenames) dataset = dataset.map(lambda x: parse(x, num_classes, normalization_factor)) if train: dataset = dataset.shuffle(buffer_size=buffer_size) # Allow infinite reading of the data. num_repeat = None else: num_repeat = 1 # Repeat the dataset the given number of times. dataset = dataset.repeat(num_repeat) # Get a batch of data with the given size. dataset = dataset.batch(batch_size) dataset = dataset.prefetch(buffer_size=prefetch_buffer_size) # Create an iterator for the dataset and the above modifications. iterator = dataset.make_one_shot_iterator() # Get the next batch of images and labels. batch_images_tf, batch_labels_tf = iterator.get_next() return batch_images_tf, batch_labels_tf 

您将需要使用tf.train.Feature例如(假设标签为ints)

对于int值:

def _int64_feature(value):
  return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))

对于字节:

def _bytes_feature(value):
  return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))

他们:

 data = {'label': _int64_feature(label),
         'image': _bytes_feature(tf.compat.as_bytes(img_bytes))}

应该做的工作

# load training set and test set
batch_images_tf, batch_labels_tf = \
    input_fn(
        filenames_train,
        FLAGS.num_classes,
        normalization_factor=FLAGS.normalization_factor,
        train=True,
        batch_size=FLAGS.batch_size,
        prefetch_buffer_size=5)

错误的方法!!!!

batch_images, batch_labels = sess.run(batch_images_tf),sess.run(batch_labels_tf)

正确的路!!!!

batch_images, batch_labels = sess.run([batch_images_tf, batch_labels_tf])

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM