TensorFlow 常用模块

老师您好,我在运行 cats_vs_dogs 图像分类时,显示 UnicodeDecodeError: ‘utf-8’ codec can’t decode byte 0xd5 in position 150: invalid continuation byte,是为什么?我的代码如下:

num_epochs = 10
batch_size = 32
learning_rate = 0.001
data_dir = 'E:\datasets\cats_vs_dogs'
train_cats_dir = data_dir + '/train/cats'
train_dogs_dir = data_dir + '/train/dogs'
test_cats_dir = data_dir + '/valid/cats'
test_dogs_dir = data_dir + '/valid/dogs'

def _decode_and_resize (filename,label):
    image_string = tf.io.read_file (filename)
    image_decoded = tf.image.decode_jpeg (image_string)
    image_resized = tf.image.resize (image_decoded,[256,256]) / 255.0
    return image_resized,label

if __name__ == '__main__':
    train_cat_filenames = tf.constant ([train_cats_dir + filename for filename in os.listdir (train_cats_dir)])
    train_dog_filenames = tf.constant ([train_dogs_dir + filename for filename in os.listdir (train_dogs_dir)])
    train_filenames = tf.concat ([train_cat_filenames,train_dog_filenames],axis=-1)
    train_labels = tf.concat ([
        tf.zeros (train_cat_filenames.shape,dtype=tf.int32),
        tf.ones (train_dog_filenames.shape,dtype=tf.int32)],
        axis=-1)
    
    train_datasets = tf.data.Dataset.from_tensor_slices ((train_filenames,train_labels))
    train_datasets = train_datasets.map (
        map_func=_decode_and_resize,
        num_parallel_calls=tf.data.experimental.AUTOTUNE)
    train_datasets = train_datasets.shuffle (buffer_size=23000)
    train_datasets = train_datasets.batch (batch_size)
    train_datasets = train_datasets.prefetch (tf.data.experimental.AUTOTUNE)
    
    model = tf.keras.Sequential ([
        tf.keras.layers.Conv2D (32, 3, activation='relu', input_shape=(256, 256, 3)),
        tf.keras.layers.MaxPooling2D (),
        tf.keras.layers.Conv2D (32, 5, activation='relu'),
        tf.keras.layers.MaxPooling2D (),
        tf.keras.layers.Flatten (),
        tf.keras.layers.Dense (64, activation='relu'),
        tf.keras.layers.Dense (2, activation='softmax')
    ])
    
    model.compile (
        optimizer=tf.keras.optimizers.Adam (learning_rate=learning_rate),
        loss=tf.keras.losses.sparse_categorical_crossentropy,
        metrics=[tf.keras.metrics.sparse_categorical_accuracy]
    )
    
    model.fit (train_datasets, epochs=num_epochs)