Kristian Schultz 4 лет назад
Родитель
Сommit
ef074b07b9
1 измененных файлов с 4 добавлено и 8 удалено
  1. 4 8
      library/convGAN.py

+ 4 - 8
library/convGAN.py

@@ -22,8 +22,7 @@ from keras.layers import Dense, Dropout, Input
 from keras.models import Model,Sequential
 from keras.models import Model,Sequential
 from tqdm import tqdm
 from tqdm import tqdm
 from keras.layers.advanced_activations import LeakyReLU
 from keras.layers.advanced_activations import LeakyReLU
-from keras.optimizers import Adam
-from keras.optimizers import RMSprop
+from tensorflow.keras.optimizers import Adam
 from keras import losses
 from keras import losses
 from keras import backend as K
 from keras import backend as K
 import tensorflow as tf
 import tensorflow as tf
@@ -53,9 +52,6 @@ features_x.shape
 # Until now we have obtained the data. We divided it into training and test sets. we separated obtained seperate variables for the majority and miority classes and their labels for both sets.
 # Until now we have obtained the data. We divided it into training and test sets. we separated obtained seperate variables for the majority and miority classes and their labels for both sets.
 
 
 ## convGAN
 ## convGAN
-from IPython.display import Image
-Image(filename='CoSPOV.jpg')
-
 def unison_shuffled_copies(a, b,seed_perm):
 def unison_shuffled_copies(a, b,seed_perm):
     'Shuffling the feature matrix along with the labels with same order'
     'Shuffling the feature matrix along with the labels with same order'
     np.random.seed(seed_perm)##change seed 1,2,3,4,5
     np.random.seed(seed_perm)##change seed 1,2,3,4,5
@@ -125,7 +121,7 @@ def conv_sample_gen():
     aff=tf.transpose(x[0]) ## Now we transpose the matrix. So each column is now a set of convex coefficients
     aff=tf.transpose(x[0]) ## Now we transpose the matrix. So each column is now a set of convex coefficients
     synth=tf.matmul(aff,min_neb_batch) ## We now do matrix multiplication of the affine combinations with the original minority batch taken as input. This generates a convex transformation of the input minority batch
     synth=tf.matmul(aff,min_neb_batch) ## We now do matrix multiplication of the affine combinations with the original minority batch taken as input. This generates a convex transformation of the input minority batch
     model = Model(inputs=min_neb_batch, outputs=synth) ## finally we compile the generator with an arbitrary minortiy neighbourhood batch as input and a covex space transformation of the same number of samples as output
     model = Model(inputs=min_neb_batch, outputs=synth) ## finally we compile the generator with an arbitrary minortiy neighbourhood batch as input and a covex space transformation of the same number of samples as output
-    opt = keras.optimizers.Adam(learning_rate=0.001)
+    opt = Adam(learning_rate=0.001)
     model.compile(loss='mean_squared_logarithmic_error', optimizer=opt)
     model.compile(loss='mean_squared_logarithmic_error', optimizer=opt)
     return model
     return model
 
 
@@ -141,7 +137,7 @@ def maj_min_disc():
     y= keras.layers.Dense(125, activation='relu')(y)
     y= keras.layers.Dense(125, activation='relu')(y)
     output= keras.layers.Dense(2, activation='sigmoid')(y) ## two output nodes. outputs have to be one-hot coded (see labels variable before)
     output= keras.layers.Dense(2, activation='sigmoid')(y) ## two output nodes. outputs have to be one-hot coded (see labels variable before)
     model = Model(inputs=samples, outputs=output) ## compile model
     model = Model(inputs=samples, outputs=output) ## compile model
-    opt = keras.optimizers.Adam(learning_rate=0.0001)
+    opt = Adam(learning_rate=0.0001)
     model.compile(loss='binary_crossentropy', optimizer=opt)
     model.compile(loss='binary_crossentropy', optimizer=opt)
     return model
     return model
 
 
@@ -162,7 +158,7 @@ def convGAN(generator,discriminator):
     output=discriminator(new_samples) ## pass the concatenated vector into the discriminator to know its decisions
     output=discriminator(new_samples) ## pass the concatenated vector into the discriminator to know its decisions
     ## note that, the discriminator will not be traied but will make decisions based on its previous training while using this function
     ## note that, the discriminator will not be traied but will make decisions based on its previous training while using this function
     model = Model(inputs=batch_data, outputs=output)
     model = Model(inputs=batch_data, outputs=output)
-    opt = keras.optimizers.Adam(learning_rate=0.0001)
+    opt = Adam(learning_rate=0.0001)
     model.compile(loss='mse', optimizer=opt)
     model.compile(loss='mse', optimizer=opt)
     return model
     return model