def store(tfrecord_file, filenames, labels):with tf.io.TFRecordWriter(tfrecord_file) as writer:for filename, label in zip(filenames, labels):image = open(filename, 'rb').read()feature = {'image': tf.train.Feature(bytes_listain.BytesList(value=[image])),'label': tf.train.Feature(int64_listain.Int64List(value=[label]))}example = tf.train.Example(featuresain.Features(feature=feature))writer.write(example.SerializeToString())
data_dir = "./cats_vs_dogs/sample"train_cats_dir = data_dir + "/train/cats/"
train_dogs_dir = data_dir + "/train/dogs/"
train_tfrecord_file = "./cats_vs_dogs/train.tfrecords"train_cat_filenames = [train_cats_dir + filename for filename in os.listdir(train_cats_dir)]
train_dog_filenames = [train_dogs_dir + filename for filename in os.listdir(train_dogs_dir)]
train_filenames = train_cat_filenames + train_dog_filenames
train_labels = [0] * len(train_cat_filenames) + [1] * len(train_dog_filenames)test_cats_dir = data_dir + "/valid/cats/"
test_dogs_dir = data_dir + "/valid/dogs/"
test_tfrecord_file = "./cats_vs_dogs/test.tfrecords"test_cat_filenames = [test_cats_dir + filename for filename in os.listdir(test_cats_dir)]
test_dog_filenames = [test_dogs_dir + filename for filename in os.listdir(test_dogs_dir)]
test_filenames = test_cat_filenames + test_dog_filenames
test_labels = [0] * len(test_cat_filenames) + [1] * len(test_dog_filenames)
store(train_tfrecord_file)
store(test_tfrecord_file)
train_tfrecord_file = "./cats_vs_dogs/train.tfrecords"
test_tfrecord_file = "./cats_vs_dogs/test.tfrecords"train_raw_dataset = tf.data.TFRecordDataset(train_tfrecord_file)
test_raw_dataset = tf.data.TFRecordDataset(test_tfrecord_file)feature_description = {'image': tf.io.FixedLenFeature([], tf.string),'label': tf.io.FixedLenFeature([], tf.int64)
}test_dataset = test_raw_dataset.map(_parse_example)
train_dataset = train_raw_dataset.map(_parse_example)
def _parse_example(example_string):feature_dict = tf.io.parse_single_example(example_string, feature_description)feature_dict['image'] = tf.io.decode_jpeg(feature_dict['image']) # 解码JPEG图片feature_dict['image']=size(feature_dict['image'], [256, 256]) / 255.0return feature_dict['image'], feature_dict['label']
训练数据集的预处理
train_dataset = train_dataset.shuffle(buffer_size=buffer_size).batch(batch_size=batch_size).prefetch(buffer_size=perimental.AUTOTUNE)
#测试数据集如果太大,也需要这样处理,否则会报错
test_dataset = test_dataset.shuffle(buffer_size=buffer_size).batch(batch_size=batch_size).prefetch(buffer_size=perimental.AUTOTUNE)
本文发布于:2024-01-28 10:57:09,感谢您对本站的认可!
本文链接:https://www.4u4v.net/it/17064106336926.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
留言与评论(共有 0 条评论) |