convGAN.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. import numpy as np
  2. from numpy.random import seed
  3. import pandas as pd
  4. import matplotlib.pyplot as plt
  5. from library.interfaces import GanBaseClass
  6. from library.dataset import DataSet
  7. from sklearn.decomposition import PCA
  8. from sklearn.metrics import confusion_matrix
  9. from sklearn.metrics import f1_score
  10. from sklearn.metrics import cohen_kappa_score
  11. from sklearn.metrics import precision_score
  12. from sklearn.metrics import recall_score
  13. from sklearn.neighbors import NearestNeighbors
  14. from sklearn.utils import shuffle
  15. from imblearn.datasets import fetch_datasets
  16. from keras.layers import Dense, Input, Multiply, Flatten, Conv1D, Reshape
  17. from keras.models import Model
  18. from keras import backend as K
  19. from tqdm import tqdm
  20. import tensorflow as tf
  21. from tensorflow.keras.optimizers import Adam
  22. from tensorflow.keras.layers import Lambda
  23. from library.NNSearch import NNSearch
  24. import warnings
  25. warnings.filterwarnings("ignore")
  26. def repeat(x, times):
  27. return [x for _i in range(times)]
  28. def create01Labels(totalSize, sizeFirstHalf):
  29. labels = repeat(np.array([1,0]), sizeFirstHalf)
  30. labels.extend(repeat(np.array([0,1]), totalSize - sizeFirstHalf))
  31. return np.array(labels)
  32. class ConvGAN(GanBaseClass):
  33. """
  34. This is a toy example of a GAN.
  35. It repeats the first point of the training-data-set.
  36. """
  37. def __init__(self, n_feat, neb=5, gen=5, neb_epochs=10, debug=True):
  38. self.isTrained = False
  39. self.n_feat = n_feat
  40. self.neb = neb
  41. self.gen = gen
  42. self.neb_epochs = 10
  43. self.loss_history = None
  44. self.debug = debug
  45. self.dataSet = None
  46. self.conv_sample_generator = None
  47. self.maj_min_discriminator = None
  48. self.cg = None
  49. if neb > gen:
  50. raise ValueError(f"Expected neb <= gen but got neb={neb} and gen={gen}.")
  51. def reset(self):
  52. """
  53. Resets the trained GAN to an random state.
  54. """
  55. self.isTrained = False
  56. ## instanciate generator network and visualize architecture
  57. self.conv_sample_generator = self._conv_sample_gen()
  58. ## instanciate discriminator network and visualize architecture
  59. self.maj_min_discriminator = self._maj_min_disc()
  60. ## instanciate network and visualize architecture
  61. self.cg = self._convGAN(self.conv_sample_generator, self.maj_min_discriminator)
  62. if self.debug:
  63. print(self.conv_sample_generator.summary())
  64. print('\n')
  65. print(self.maj_min_discriminator.summary())
  66. print('\n')
  67. print(self.cg.summary())
  68. print('\n')
  69. def train(self, dataSet):
  70. """
  71. Trains the GAN.
  72. It stores the data points in the training data set and mark as trained.
  73. *dataSet* is a instance of /library.dataset.DataSet/. It contains the training dataset.
  74. We are only interested in the first *maxListSize* points in class 1.
  75. """
  76. if dataSet.data1.shape[0] <= 0:
  77. raise AttributeError("Train: Expected data class 1 to contain at least one point.")
  78. self.dataSet = dataSet
  79. self.nmb = self._NMB_prepare(dataSet.data1)
  80. self._rough_learning(dataSet.data1, dataSet.data0)
  81. self.isTrained = True
  82. def generateDataPoint(self):
  83. """
  84. Returns one synthetic data point by repeating the stored list.
  85. """
  86. return (self.generateData(1))[0]
  87. def generateData(self, numOfSamples=1):
  88. """
  89. Generates a list of synthetic data-points.
  90. *numOfSamples* is a integer > 0. It gives the number of new generated samples.
  91. """
  92. if not self.isTrained:
  93. raise ValueError("Try to generate data with untrained Re.")
  94. data_min = self.dataSet.data1
  95. ## roughly claculate the upper bound of the synthetic samples to be generated from each neighbourhood
  96. synth_num = (numOfSamples // len(data_min)) + 1
  97. ## generate synth_num synthetic samples from each minority neighbourhood
  98. synth_set=[]
  99. for i in range(len(data_min)):
  100. synth_set.extend(self._generate_data_for_min_point(i, synth_num))
  101. ## extract the exact number of synthetic samples needed to exactly balance the two classes
  102. synth_set = np.array(synth_set[:numOfSamples])
  103. return synth_set
  104. # ###############################################################
  105. # Hidden internal functions
  106. # ###############################################################
  107. # Creating the GAN
  108. def _conv_sample_gen(self):
  109. """
  110. the generator network to generate synthetic samples from the convex space
  111. of arbitrary minority neighbourhoods
  112. """
  113. ## takes minority batch as input
  114. min_neb_batch = Input(shape=(self.n_feat,))
  115. ## reshaping the 2D tensor to 3D for using 1-D convolution,
  116. ## otherwise 1-D convolution won't work.
  117. x = tf.reshape(min_neb_batch, (1, self.neb, self.n_feat), name=None)
  118. ## using 1-D convolution, feature dimension remains the same
  119. x = Conv1D(self.n_feat, 3, activation='relu')(x)
  120. ## flatten after convolution
  121. x = Flatten()(x)
  122. ## add dense layer to transform the vector to a convenient dimension
  123. x = Dense(self.neb * self.gen, activation='relu')(x)
  124. ## again, witching to 2-D tensor once we have the convenient shape
  125. x = Reshape((self.neb, self.gen))(x)
  126. ## row wise sum
  127. s = K.sum(x, axis=1)
  128. ## adding a small constant to always ensure the row sums are non zero.
  129. ## if this is not done then during initialization the sum can be zero.
  130. s_non_zero = Lambda(lambda x: x + .000001)(s)
  131. ## reprocals of the approximated row sum
  132. sinv = tf.math.reciprocal(s_non_zero)
  133. ## At this step we ensure that row sum is 1 for every row in x.
  134. ## That means, each row is set of convex co-efficient
  135. x = Multiply()([sinv, x])
  136. ## Now we transpose the matrix. So each column is now a set of convex coefficients
  137. aff=tf.transpose(x[0])
  138. ## We now do matrix multiplication of the affine combinations with the original
  139. ## minority batch taken as input. This generates a convex transformation
  140. ## of the input minority batch
  141. synth=tf.matmul(aff, min_neb_batch)
  142. ## finally we compile the generator with an arbitrary minortiy neighbourhood batch
  143. ## as input and a covex space transformation of the same number of samples as output
  144. model = Model(inputs=min_neb_batch, outputs=synth)
  145. opt = Adam(learning_rate=0.001)
  146. model.compile(loss='mean_squared_logarithmic_error', optimizer=opt)
  147. return model
  148. def _maj_min_disc(self):
  149. """
  150. the discriminator is trained intwo phase:
  151. first phase: while training GAN the discriminator learns to differentiate synthetic
  152. minority samples generated from convex minority data space against
  153. the borderline majority samples
  154. second phase: after the GAN generator learns to create synthetic samples,
  155. it can be used to generate synthetic samples to balance the dataset
  156. and then rettrain the discriminator with the balanced dataset
  157. """
  158. ## takes as input synthetic sample generated as input stacked upon a batch of
  159. ## borderline majority samples
  160. samples = Input(shape=(self.n_feat,))
  161. ## passed through two dense layers
  162. y = Dense(250, activation='relu')(samples)
  163. y = Dense(125, activation='relu')(y)
  164. ## two output nodes. outputs have to be one-hot coded (see labels variable before)
  165. output = Dense(2, activation='sigmoid')(y)
  166. ## compile model
  167. model = Model(inputs=samples, outputs=output)
  168. opt = Adam(learning_rate=0.0001)
  169. model.compile(loss='binary_crossentropy', optimizer=opt)
  170. return model
  171. def _convGAN(self, generator, discriminator):
  172. """
  173. for joining the generator and the discriminator
  174. conv_coeff_generator-> generator network instance
  175. maj_min_discriminator -> discriminator network instance
  176. """
  177. ## by default the discriminator trainability is switched off.
  178. ## Thus training the GAN means training the generator network as per previously
  179. ## trained discriminator network.
  180. discriminator.trainable = False
  181. ## input receives a neighbourhood minority batch
  182. ## and a proximal majority batch concatenated
  183. batch_data = Input(shape=(self.n_feat,))
  184. ##- print(f"GAN: 0..{self.neb}/{self.gen}..")
  185. ## extract minority batch
  186. min_batch = Lambda(lambda x: x[:self.neb])(batch_data)
  187. ## extract majority batch
  188. maj_batch = Lambda(lambda x: x[self.gen:])(batch_data)
  189. ## pass minority batch into generator to obtain convex space transformation
  190. ## (synthetic samples) of the minority neighbourhood input batch
  191. conv_samples = generator(min_batch)
  192. ## concatenate the synthetic samples with the majority samples
  193. new_samples = tf.concat([conv_samples, maj_batch],axis=0)
  194. ##- new_samples = tf.concat([conv_samples, conv_samples, conv_samples, conv_samples],axis=0)
  195. ## pass the concatenated vector into the discriminator to know its decisions
  196. output = discriminator(new_samples)
  197. ##- output = Lambda(lambda x: x[:2 * self.gen])(output)
  198. ## note that, the discriminator will not be traied but will make decisions based
  199. ## on its previous training while using this function
  200. model = Model(inputs=batch_data, outputs=output)
  201. opt = Adam(learning_rate=0.0001)
  202. model.compile(loss='mse', optimizer=opt)
  203. return model
  204. # Create synthetic points
  205. def _generate_data_for_min_point(self, index, synth_num):
  206. """
  207. generate synth_num synthetic points for a particular minoity sample
  208. synth_num -> required number of data points that can be generated from a neighbourhood
  209. data_min -> minority class data
  210. neb -> oversampling neighbourhood
  211. index -> index of the minority sample in a training data whose neighbourhood we want to obtain
  212. """
  213. runs = int(synth_num / self.neb) + 1
  214. synth_set = []
  215. for _run in range(runs):
  216. batch = self._NMB_guided(index)
  217. synth_batch = self.conv_sample_generator.predict(batch)
  218. synth_set.extend(synth_batch)
  219. return synth_set[:synth_num]
  220. # Training
  221. def _rough_learning(self, data_min, data_maj):
  222. generator = self.conv_sample_generator
  223. discriminator = self.maj_min_discriminator
  224. GAN = self.cg
  225. loss_history = [] ## this is for stroring the loss for every run
  226. min_idx = 0
  227. neb_epoch_count = 1
  228. labels = tf.convert_to_tensor(create01Labels(2 * self.gen, self.gen))
  229. for step in range(self.neb_epochs * len(data_min)):
  230. ## generate minority neighbourhood batch for every minority class sampls by index
  231. min_batch = self._NMB_guided(min_idx)
  232. min_idx = min_idx + 1
  233. ## generate random proximal majority batch
  234. maj_batch = self._BMB(data_min, data_maj)
  235. ## generate synthetic samples from convex space
  236. ## of minority neighbourhood batch using generator
  237. conv_samples = generator.predict(min_batch)
  238. ## concatenate them with the majority batch
  239. concat_sample = tf.concat([conv_samples, maj_batch], axis=0)
  240. ## switch on discriminator training
  241. discriminator.trainable = True
  242. ## train the discriminator with the concatenated samples and the one-hot encoded labels
  243. discriminator.fit(x=concat_sample, y=labels, verbose=0)
  244. ## switch off the discriminator training again
  245. discriminator.trainable = False
  246. ## use the GAN to make the generator learn on the decisions
  247. ## made by the previous discriminator training
  248. ##- print(f"concat sample shape: {concat_sample.shape}/{labels.shape}")
  249. gan_loss_history = GAN.fit(concat_sample, y=labels, verbose=0)
  250. ## store the loss for the step
  251. loss_history.append(gan_loss_history.history['loss'])
  252. if self.debug and ((step + 1) % 10 == 0):
  253. print(f"{step + 1} neighbourhood batches trained; running neighbourhood epoch {neb_epoch_count}")
  254. if min_idx == len(data_min) - 1:
  255. if self.debug:
  256. print(f"Neighbourhood epoch {neb_epoch_count} complete")
  257. neb_epoch_count = neb_epoch_count + 1
  258. min_idx = 0
  259. if self.debug:
  260. run_range = range(1, len(loss_history) + 1)
  261. plt.rcParams["figure.figsize"] = (16,10)
  262. plt.xticks(fontsize=20)
  263. plt.yticks(fontsize=20)
  264. plt.xlabel('runs', fontsize=25)
  265. plt.ylabel('loss', fontsize=25)
  266. plt.title('Rough learning loss for discriminator', fontsize=25)
  267. plt.plot(run_range, loss_history)
  268. plt.show()
  269. self.conv_sample_generator = generator
  270. self.maj_min_discriminator = discriminator
  271. self.cg = GAN
  272. self.loss_history = loss_history
  273. ## convGAN
  274. def _BMB(self, data_min, data_maj):
  275. ## Generate a borderline majority batch
  276. ## data_min -> minority class data
  277. ## data_maj -> majority class data
  278. ## neb -> oversampling neighbourhood
  279. ## gen -> convex combinations generated from each neighbourhood
  280. return tf.convert_to_tensor(
  281. data_maj[np.random.randint(len(data_maj), size=self.gen)]
  282. )
  283. def _NMB_prepare(self, data_min):
  284. neigh = NNSearch(self.neb)
  285. neigh.fit(data_min)
  286. return (data_min, neigh)
  287. def _NMB_guided(self, index):
  288. ## generate a minority neighbourhood batch for a particular minority sample
  289. ## we need this for minority data generation
  290. ## we will generate synthetic samples for each training data neighbourhood
  291. ## index -> index of the minority sample in a training data whose neighbourhood we want to obtain
  292. ## data_min -> minority class data
  293. ## neb -> oversampling neighbourhood
  294. (data_min, neigh) = self.nmb
  295. nmbi = np.array([neigh.neighbourhoodOfItem(index)])
  296. nmbi = shuffle(nmbi)
  297. nmb = data_min[nmbi]
  298. nmb = tf.convert_to_tensor(nmb[0])
  299. return nmb