|
@@ -16,23 +16,29 @@ from keras.layers import Dense, Dropout, Input
|
|
|
from keras.models import Model, Sequential
|
|
from keras.models import Model, Sequential
|
|
|
from keras.layers.advanced_activations import LeakyReLU
|
|
from keras.layers.advanced_activations import LeakyReLU
|
|
|
from tensorflow.keras.optimizers import Adam
|
|
from tensorflow.keras.optimizers import Adam
|
|
|
|
|
+import tensorflow as tf
|
|
|
|
|
|
|
|
|
|
|
|
|
class SimpleGan(GanBaseClass):
|
|
class SimpleGan(GanBaseClass):
|
|
|
"""
|
|
"""
|
|
|
A class for a simple GAN.
|
|
A class for a simple GAN.
|
|
|
"""
|
|
"""
|
|
|
- def __init__(self, numOfFeatures=786, noiseSize=100, epochs=3, batchSize=128):
|
|
|
|
|
|
|
+ def __init__(self, numOfFeatures=786, noiseSize=None, epochs=3, batchSize=128, withTanh=False, gLayers=None, dLayers=None):
|
|
|
self.isTrained = False
|
|
self.isTrained = False
|
|
|
- self.noiseSize = noiseSize
|
|
|
|
|
|
|
+ self.noiseSize = noiseSize if noiseSize is not None else numOfFeatures
|
|
|
self.numOfFeatures = numOfFeatures
|
|
self.numOfFeatures = numOfFeatures
|
|
|
self.epochs = epochs
|
|
self.epochs = epochs
|
|
|
self.batchSize = batchSize
|
|
self.batchSize = batchSize
|
|
|
|
|
+ self.scaler = 1.0
|
|
|
|
|
+ self.withTanh = withTanh
|
|
|
|
|
+ self.dLayers = dLayers if dLayers is not None else [1024, 512, 256]
|
|
|
|
|
+ self.gLayers = gLayers if gLayers is not None else [256, 512, 1024]
|
|
|
|
|
|
|
|
def reset(self):
|
|
def reset(self):
|
|
|
"""
|
|
"""
|
|
|
Resets the trained GAN to an random state.
|
|
Resets the trained GAN to an random state.
|
|
|
"""
|
|
"""
|
|
|
|
|
+ self.scaler = 1.0
|
|
|
self.generator = self._createGenerator(self.numOfFeatures, self.noiseSize)
|
|
self.generator = self._createGenerator(self.numOfFeatures, self.noiseSize)
|
|
|
self.discriminator = self._createDiscriminator(self.numOfFeatures)
|
|
self.discriminator = self._createDiscriminator(self.numOfFeatures)
|
|
|
self.gan = self._createGan(self.noiseSize)
|
|
self.gan = self._createGan(self.noiseSize)
|
|
@@ -52,32 +58,34 @@ class SimpleGan(GanBaseClass):
|
|
|
|
|
|
|
|
def _createGenerator(self, numOfFeatures, noiseSize):
|
|
def _createGenerator(self, numOfFeatures, noiseSize):
|
|
|
generator=Sequential()
|
|
generator=Sequential()
|
|
|
- generator.add(Dense(units=256, input_dim=noiseSize))
|
|
|
|
|
- generator.add(LeakyReLU(0.2))
|
|
|
|
|
|
|
+ for (n, size) in enumerate(self.dLayers):
|
|
|
|
|
+ if n == 0:
|
|
|
|
|
+ generator.add(Dense(units=size, input_dim=noiseSize))
|
|
|
|
|
+ generator.add(LeakyReLU(0.2))
|
|
|
|
|
+ else:
|
|
|
|
|
+ generator.add(Dense(units=size))
|
|
|
|
|
+ generator.add(LeakyReLU(0.2))
|
|
|
|
|
|
|
|
- generator.add(Dense(units=512))
|
|
|
|
|
- generator.add(LeakyReLU(0.2))
|
|
|
|
|
|
|
|
|
|
- generator.add(Dense(units=1024))
|
|
|
|
|
- generator.add(LeakyReLU(0.2))
|
|
|
|
|
-
|
|
|
|
|
- generator.add(Dense(units=numOfFeatures, activation='tanh'))
|
|
|
|
|
|
|
+ if self.withTanh:
|
|
|
|
|
+ generator.add(Dense(units=numOfFeatures, activation='tanh'))
|
|
|
|
|
+ else:
|
|
|
|
|
+ generator.add(Dense(units=numOfFeatures, activation='softsign'))
|
|
|
|
|
|
|
|
generator.compile(loss='binary_crossentropy', optimizer=self._adamOptimizer())
|
|
generator.compile(loss='binary_crossentropy', optimizer=self._adamOptimizer())
|
|
|
return generator
|
|
return generator
|
|
|
|
|
|
|
|
def _createDiscriminator(self, numOfFeatures):
|
|
def _createDiscriminator(self, numOfFeatures):
|
|
|
discriminator=Sequential()
|
|
discriminator=Sequential()
|
|
|
- discriminator.add(Dense(units=1024, input_dim=numOfFeatures))
|
|
|
|
|
- discriminator.add(LeakyReLU(0.2))
|
|
|
|
|
- discriminator.add(Dropout(0.3))
|
|
|
|
|
-
|
|
|
|
|
- discriminator.add(Dense(units=512))
|
|
|
|
|
- discriminator.add(LeakyReLU(0.2))
|
|
|
|
|
- discriminator.add(Dropout(0.3))
|
|
|
|
|
|
|
|
|
|
- discriminator.add(Dense(units=256))
|
|
|
|
|
- discriminator.add(LeakyReLU(0.2))
|
|
|
|
|
|
|
+ for (n, size) in enumerate(self.dLayers):
|
|
|
|
|
+ if n == 0:
|
|
|
|
|
+ discriminator.add(Dense(units=size, input_dim=numOfFeatures))
|
|
|
|
|
+ discriminator.add(LeakyReLU(0.2))
|
|
|
|
|
+ else:
|
|
|
|
|
+ discriminator.add(Dropout(0.3))
|
|
|
|
|
+ discriminator.add(Dense(units=size))
|
|
|
|
|
+ discriminator.add(LeakyReLU(0.2))
|
|
|
|
|
|
|
|
discriminator.add(Dense(units=1, activation='sigmoid'))
|
|
discriminator.add(Dense(units=1, activation='sigmoid'))
|
|
|
|
|
|
|
@@ -91,6 +99,13 @@ class SimpleGan(GanBaseClass):
|
|
|
if trainDataSize <= 0:
|
|
if trainDataSize <= 0:
|
|
|
raise AttributeError("Train GAN: Expected data class 1 to contain at least one point.")
|
|
raise AttributeError("Train GAN: Expected data class 1 to contain at least one point.")
|
|
|
|
|
|
|
|
|
|
+ if self.withTanh:
|
|
|
|
|
+ self.scaler = 1.0
|
|
|
|
|
+ scaleDown = 1.0
|
|
|
|
|
+ else:
|
|
|
|
|
+ self.scaler = max(1.0, 1.1 * tf.reduce_max(tf.abs(trainData)).numpy())
|
|
|
|
|
+ scaleDown = 1.0 / self.scaler
|
|
|
|
|
+
|
|
|
for e in range(self.epochs):
|
|
for e in range(self.epochs):
|
|
|
print(f"Epoch {e + 1}/{self.epochs}")
|
|
print(f"Epoch {e + 1}/{self.epochs}")
|
|
|
for _ in range(self.batchSize):
|
|
for _ in range(self.batchSize):
|
|
@@ -98,15 +113,15 @@ class SimpleGan(GanBaseClass):
|
|
|
noise= np.random.normal(0, 1, [self.batchSize, self.noiseSize])
|
|
noise= np.random.normal(0, 1, [self.batchSize, self.noiseSize])
|
|
|
|
|
|
|
|
# Generate fake MNIST images from noised input
|
|
# Generate fake MNIST images from noised input
|
|
|
- generatedImages = self.generator.predict(noise)
|
|
|
|
|
|
|
+ syntheticBatch = self.generator.predict(noise)
|
|
|
|
|
|
|
|
# Get a random set of real images
|
|
# Get a random set of real images
|
|
|
- image_batch = dataset.data1[
|
|
|
|
|
|
|
+ realBatch = dataset.data1[
|
|
|
np.random.randint(low=0, high=trainDataSize, size=self.batchSize)
|
|
np.random.randint(low=0, high=trainDataSize, size=self.batchSize)
|
|
|
]
|
|
]
|
|
|
|
|
|
|
|
#Construct different batches of real and fake data
|
|
#Construct different batches of real and fake data
|
|
|
- X = np.concatenate([image_batch, generatedImages])
|
|
|
|
|
|
|
+ X = np.concatenate([scaleDown * realBatch, syntheticBatch])
|
|
|
|
|
|
|
|
# Labels for generated and real data
|
|
# Labels for generated and real data
|
|
|
y_dis=np.zeros(2 * self.batchSize)
|
|
y_dis=np.zeros(2 * self.batchSize)
|
|
@@ -139,4 +154,4 @@ class SimpleGan(GanBaseClass):
|
|
|
noise = np.random.normal(0, 1, [numOfSamples, self.noiseSize])
|
|
noise = np.random.normal(0, 1, [numOfSamples, self.noiseSize])
|
|
|
|
|
|
|
|
# Generate fake MNIST images from noised input
|
|
# Generate fake MNIST images from noised input
|
|
|
- return self.generator.predict(noise)
|
|
|
|
|
|
|
+ return self.scaler * self.generator.predict(noise)
|