|
|
@@ -46,6 +46,7 @@ class ConvGAN(GanBaseClass):
|
|
|
self.maj_min_discriminator = None
|
|
|
self.withMajorhoodNbSearch = withMajorhoodNbSearch
|
|
|
self.cg = None
|
|
|
+ self.canPredict = True
|
|
|
|
|
|
if neb > gen:
|
|
|
raise ValueError(f"Expected neb <= gen but got neb={neb} and gen={gen}.")
|
|
|
@@ -74,7 +75,7 @@ class ConvGAN(GanBaseClass):
|
|
|
print(self.cg.summary())
|
|
|
print('\n')
|
|
|
|
|
|
- def train(self, dataSet):
|
|
|
+ def train(self, dataSet, discTrainCount=5):
|
|
|
"""
|
|
|
Trains the GAN.
|
|
|
|
|
|
@@ -97,7 +98,7 @@ class ConvGAN(GanBaseClass):
|
|
|
self.nmbMaj = None
|
|
|
|
|
|
# Do the training.
|
|
|
- self._rough_learning(dataSet.data1, dataSet.data0)
|
|
|
+ self._rough_learning(dataSet.data1, dataSet.data0, discTrainCount)
|
|
|
|
|
|
# Neighborhood in majority class is no longer needed. So save memory.
|
|
|
self.nmbMaj = None
|
|
|
@@ -132,7 +133,7 @@ class ConvGAN(GanBaseClass):
|
|
|
|
|
|
return synth_set
|
|
|
|
|
|
- def predict(self, data):
|
|
|
+ def predictReal(self, data):
|
|
|
prediction = self.maj_min_discriminator.predict(data)
|
|
|
return np.array([x[0] for x in prediction])
|
|
|
|
|
|
@@ -203,6 +204,7 @@ class ConvGAN(GanBaseClass):
|
|
|
## passed through two dense layers
|
|
|
y = Dense(250, activation='relu')(samples)
|
|
|
y = Dense(125, activation='relu')(y)
|
|
|
+ y = Dense(75, activation='relu')(y)
|
|
|
|
|
|
## two output nodes. outputs have to be one-hot coded (see labels variable before)
|
|
|
output = Dense(2, activation='sigmoid')(y)
|
|
|
@@ -277,7 +279,7 @@ class ConvGAN(GanBaseClass):
|
|
|
|
|
|
|
|
|
# Training
|
|
|
- def _rough_learning(self, data_min, data_maj):
|
|
|
+ def _rough_learning(self, data_min, data_maj, discTrainCount):
|
|
|
generator = self.conv_sample_generator
|
|
|
discriminator = self.maj_min_discriminator
|
|
|
GAN = self.cg
|
|
|
@@ -288,6 +290,28 @@ class ConvGAN(GanBaseClass):
|
|
|
labels = tf.convert_to_tensor(create01Labels(2 * self.gen, self.gen))
|
|
|
|
|
|
for neb_epoch_count in range(self.neb_epochs):
|
|
|
+ if discTrainCount > 0:
|
|
|
+ for n in range(discTrainCount):
|
|
|
+ for min_idx in range(minSetSize):
|
|
|
+ ## generate minority neighbourhood batch for every minority class sampls by index
|
|
|
+ min_batch_indices = self.nmbMin.neighbourhoodOfItem(min_idx)
|
|
|
+ min_batch = self.nmbMin.getPointsFromIndices(min_batch_indices)
|
|
|
+ ## generate random proximal majority batch
|
|
|
+ maj_batch = self._BMB(data_maj, min_batch_indices)
|
|
|
+
|
|
|
+ ## generate synthetic samples from convex space
|
|
|
+ ## of minority neighbourhood batch using generator
|
|
|
+ conv_samples = generator.predict(min_batch)
|
|
|
+ ## concatenate them with the majority batch
|
|
|
+ concat_sample = tf.concat([conv_samples, maj_batch], axis=0)
|
|
|
+
|
|
|
+ ## switch on discriminator training
|
|
|
+ discriminator.trainable = True
|
|
|
+ ## train the discriminator with the concatenated samples and the one-hot encoded labels
|
|
|
+ discriminator.fit(x=concat_sample, y=labels, verbose=0)
|
|
|
+ ## switch off the discriminator training again
|
|
|
+ discriminator.trainable = False
|
|
|
+
|
|
|
for min_idx in range(minSetSize):
|
|
|
## generate minority neighbourhood batch for every minority class sampls by index
|
|
|
min_batch_indices = self.nmbMin.neighbourhoodOfItem(min_idx)
|