我有下面的代码,在上面运行一个生成对抗网络(gan)
374
尺寸训练图像
32x32
.
为什么出现以下错误?
ValueError: Input arrays should have the same number of samples as target arrays. Found 7500 input samples and 40 target samples.
出现在下面的语句中:
discriminator_loss = discriminator.train_on_batch(combined_images,labels)
import keras
from keras import layers
import numpy as np
import cv2
import os
from keras.preprocessing import image
latent_dimension = 32
height = 32
width = 32
channels = 3
iterations = 100000
batch_size = 20
real_images = []
train_directory = '/training'
results_directory = '/results'
generator_input = keras.Input(shape=(latent_dimension,))
x = layers.Dense(128*16*16)(generator_input)
x = layers.LeakyReLU()(x)
x = layers.Reshape((16,16,128))(x)
x = layers.Conv2D(256,5,padding='same')(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2DTranspose(256,4,strides=2,padding='same')(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(256,5,padding='same')(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(256,5,padding='same')(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(channels,7,activation='tanh',padding='same')(x)
generator = keras.models.Model(generator_input,x)
generator.summary()
discriminator_input = layers.Input(shape=(height,width,channels))
x = layers.Conv2D(128,3)(discriminator_input)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128,4,strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128,4,strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128,4,strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Flatten()(x)
x = layers.Dropout(0.4)(x)
x = layers.Dense(1,activation='sigmoid')(x)
discriminator = keras.models.Model(discriminator_input,x)
discriminator.summary()
discriminator_optimizer = keras.optimizers.RMSprop(
lr=0.0008,
clipvalue=1.0,
decay=1e-8)
discriminator.compile(optimizer=discriminator_optimizer, loss='binary_crossentropy')
discriminator.trainable = False
gan_input = keras.Input(shape=(latent_dimension,))
gan_output = discriminator(generator(gan_input))
gan = keras.models.Model(gan_input,gan_output)
gan_optimizer = keras.optimizers.RMSprop(
lr=0.0004,
clipvalue=1.0,
decay=1e-8)
gan.compile(optimizer=gan_optimizer,loss='binary_crossentropy')
start = 0
for step in range(iterations):
random_latent_vectors = np.random.normal(size=(batch_size,latent_dimension))
generated_images = generator.predict(random_latent_vectors)
stop = start + batch_size
i = start
for root, dirs, files in os.walk(train_directory):
for file in files:
for i in range(stop-start):
img = cv2.imread(root + '/' + file)
real_images.append(img)
i = i+1
combined_images = np.concatenate([generated_images,real_images])
labels = np.concatenate([np.ones((batch_size,1)),np.zeros(batch_size,1)])
labels = labels + 0.05 * np.random.random(labels.shape)
discriminator_loss = discriminator.train_on_batch(combined_images,labels)
random_latent_vectors = np.random.normal(size=(batch_size,latent_dimension))
misleading_targets = np.zeros((batch_size,1))
adversarial_loss = gan.train_on_batch(random_latent_vectors,misleading_targets)
start = start + batch_size
if start > len(train_directory)-batch_size:
start = 0
if step % 100 == 0:
gan.save_weights('gan.h5')
print'discriminator loss: '
print discriminator_loss
print 'adversarial loss: '
print adversarial_loss
img = image.array_to_img(generated_images[0] * 255.)
img.save(os.path.join(results_directory,'generated_melanoma_image' + str(step) + '.png'))
img = image.array_to_img(real_images[0] * 255.)
img.save(os.path.join(results_directory,'real_melanoma_image' + str(step) + '.png'))
谢谢。