Procházet zdrojové kódy

Added noise layer to XConvGeN and splitted projection.

Kristian Schultz před 3 roky
rodič
revize
7b9c7c0d00
3 změnil soubory, kde provedl 227 přidání a 50 odebrání
  1. 164 20
      XConvGeN-Example.ipynb
  2. 4 1
      library/generators/SimpleGan.py
  3. 59 29
      library/generators/XConvGeN.py

Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 164 - 20
XConvGeN-Example.ipynb


+ 4 - 1
library/generators/SimpleGan.py

@@ -14,7 +14,10 @@ from library.interfaces import GanBaseClass
 
 from keras.layers import Dense, Dropout, Input
 from keras.models import Model, Sequential
-from keras.layers.advanced_activations import LeakyReLU
+try:
+    from keras.layers.advanced_activations import LeakyReLU
+except:
+    from keras.layers import LeakyReLU
 from tensorflow.keras.optimizers import Adam
 import tensorflow as tf
 

+ 59 - 29
library/generators/XConvGeN.py

@@ -5,14 +5,15 @@ from library.interfaces import GanBaseClass
 from library.dataset import DataSet
 from library.timing import timing
 
-from keras.layers import Dense, Input, Multiply, Flatten, Conv1D, Reshape
-from keras.models import Model
+from keras.layers import Dense, Input, Multiply, Flatten, Conv1D, Reshape, InputLayer, Add
+from keras.models import Model, Sequential
 from keras import backend as K
-from tqdm import tqdm
+#from tqdm import tqdm
 
 import tensorflow as tf
 from tensorflow.keras.optimizers import Adam
 from tensorflow.keras.layers import Lambda
+import tensorflow_probability as tfp
 
 from sklearn.utils import shuffle
 
@@ -200,40 +201,69 @@ class XConvGeN(GanBaseClass):
     # ###############################################################
 
     # Creating the Network: Generator
-    def _conv_sample_gen(self):
+    def _conv_sample_gen(self, layerSize=None):
         """
         The generator network to generate synthetic samples from the convex space
         of arbitrary minority neighbourhoods
         """
 
+        if layerSize is None:
+            layerSize = (self.gen // 2) + 1
+
         ## takes minority batch as input
         min_neb_batch = Input(shape=(self.neb, self.n_feat,))
 
         ## using 1-D convolution, feature dimension remains the same
-        x = Conv1D(self.n_feat, 3, activation='relu')(min_neb_batch)
+        x = Conv1D(self.n_feat, 3, activation='relu', name="UnsharpenInput")(min_neb_batch)
         ## flatten after convolution
-        x = Flatten()(x)
-        ## add dense layer to transform the vector to a convenient dimension
-        x = Dense(self.neb * self.gen, activation='relu')(x)
-
-        ## again, witching to 2-D tensor once we have the convenient shape
-        x = Reshape((self.neb, self.gen))(x)
-        ## column wise sum
-        s = K.sum(x, axis=1)
-        ## adding a small constant to always ensure the column sums are non zero.
-        ## if this is not done then during initialization the sum can be zero.
-        s_non_zero = Lambda(lambda x: x + .000001)(s)
-        ## reprocals of the approximated column sum
-        sinv = tf.math.reciprocal(s_non_zero)
-        ## At this step we ensure that column sum is 1 for every row in x.
-        ## That means, each column is set of convex co-efficient
-        x = Multiply()([sinv, x])
-        ## Now we transpose the matrix. So each row is now a set of convex coefficients
-        aff=tf.transpose(x[0])
-        ## We now do matrix multiplication of the affine combinations with the original
-        ## minority batch taken as input. This generates a convex transformation
-        ## of the input minority batch
-        synth=tf.matmul(aff, min_neb_batch)
+        x = Flatten(name="InputMatrixToVector")(x)
+
+        synth = []
+        n = 0
+        while n < self.gen:
+            w = min(layerSize, self.gen - n)
+            if w <= 0:
+                break
+            n += w
+    
+            ## add dense layer to transform the vector to a convenient dimension
+            y = Dense(self.neb * w, activation='relu', name=f"P{n}_dense")(x)
+
+            ## again, witching to 2-D tensor once we have the convenient shape
+            y = Reshape((self.neb, w), name=f"P{n}_reshape")(y)
+            ## column wise sum
+            s = K.sum(y, axis=1)
+            ## adding a small constant to always ensure the column sums are non zero.
+            ## if this is not done then during initialization the sum can be zero.
+            s_non_zero = Lambda(lambda x: x + .000001, name=f"P{n}_make_non_zero")(s)
+            ## reprocals of the approximated column sum
+            sinv = tf.math.reciprocal(s_non_zero, name=f"P{n}_invert")
+            ## At this step we ensure that column sum is 1 for every row in x.
+            ## That means, each column is set of convex co-efficient
+            y = Multiply(name=f"P{n}_normalize")([sinv, y])
+            ## Now we transpose the matrix. So each row is now a set of convex coefficients
+            aff = tf.transpose(y[0], name=f"P{n}_transpose")
+            ## We now do matrix multiplication of the affine combinations with the original
+            ## minority batch taken as input. This generates a convex transformation
+            ## of the input minority batch
+            y = tf.matmul(aff, min_neb_batch, name=f"P{n}_project")
+            synth.append(y)
+
+        synth = tf.concat(synth, axis=1, name="collect_planes")
+
+        nOut = self.gen * self.n_feat
+
+        noiseGenerator = Sequential([
+          InputLayer(input_shape=(self.gen, self.n_feat)),
+          Flatten(),
+          Dense(tfp.layers.IndependentNormal.params_size(nOut)),
+          tfp.layers.IndependentNormal(nOut)
+        ], name="RandomNoise")
+
+        noise = noiseGenerator(synth)
+        noise = Reshape((self.gen, self.n_feat), name="ReshapeNoise")(noise)
+        synth = Add(name="AddNoise")([synth, noise])
+
         ## finally we compile the generator with an arbitrary minortiy neighbourhood batch
         ## as input and a covex space transformation of the same number of samples as output
         model = Model(inputs=min_neb_batch, outputs=synth)
@@ -314,7 +344,7 @@ class XConvGeN(GanBaseClass):
         # conv_batch: (batchSize * gen, 2)
 
         maj_batch = discriminator(maj_batch)
-        maj_batch = tf.reshape(maj_batch, (-1, self.gen, 2), name="ReshapeGenDiscOutput")
+        maj_batch = tf.reshape(maj_batch, (-1, self.gen, 2), name="ReshapeMajDiscOutput")
         # conv_batch: (batchSize * gen, 2)
         
         ## concatenate the decisions
@@ -359,7 +389,7 @@ class XConvGeN(GanBaseClass):
             self.timing["GenSamples"].start()
             ## generate synthetic samples from convex space
             ## of minority neighbourhood batch using generator
-            conv_samples = generator.predict(np.array([min_batch]), batch_size=self.neb)
+            conv_samples = generator.predict(np.array([min_batch]), batch_size=self.neb, verbose=0)
             conv_samples = tf.reshape(conv_samples, shape=(self.gen, self.n_feat))
             self.timing["GenSamples"].stop()
 

Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů