convGAN.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540
  1. import os
  2. import math
  3. import random
  4. import numpy as np
  5. import pandas as pd
  6. import matplotlib.pyplot as plt
  7. import random
  8. from scipy import ndarray
  9. from sklearn.neighbors import NearestNeighbors
  10. from sklearn.decomposition import PCA
  11. from sklearn.metrics import confusion_matrix
  12. from sklearn.metrics import f1_score
  13. from sklearn.metrics import cohen_kappa_score
  14. from sklearn.metrics import precision_score
  15. from sklearn.metrics import recall_score
  16. from collections import Counter
  17. from imblearn.datasets import fetch_datasets
  18. from sklearn.preprocessing import StandardScaler
  19. import keras
  20. from keras.layers import Dense, Dropout, Input
  21. from keras.models import Model,Sequential
  22. from tqdm import tqdm
  23. from keras.layers.advanced_activations import LeakyReLU
  24. from keras.optimizers import Adam
  25. from keras.optimizers import RMSprop
  26. from keras import losses
  27. from keras import backend as K
  28. import tensorflow as tf
  29. import warnings
  30. warnings.filterwarnings("ignore")
  31. from sklearn.neighbors import KNeighborsClassifier
  32. from sklearn.ensemble import RandomForestClassifier
  33. from sklearn.ensemble import GradientBoostingClassifier
  34. from numpy.random import seed
  35. seed_num=1
  36. seed(seed_num)
  37. tf.random.set_seed(seed_num)
  38. ## Import dataset
  39. data = fetch_datasets()['yeast_me2']
  40. ## Creating label and feature matrices
  41. labels_x=data.target ## labels of the data
  42. labels_x.shape
  43. features_x=data.data ## features of the data
  44. features_x.shape
  45. # Until now we have obtained the data. We divided it into training and test sets. we separated obtained seperate variables for the majority and miority classes and their labels for both sets.
  46. ## convGAN
  47. from IPython.display import Image
  48. Image(filename='CoSPOV.jpg')
  49. def unison_shuffled_copies(a, b,seed_perm):
  50. 'Shuffling the feature matrix along with the labels with same order'
  51. np.random.seed(seed_perm)##change seed 1,2,3,4,5
  52. assert len(a) == len(b)
  53. p = np.random.permutation(len(a))
  54. return a[p], b[p]
  55. def BMB(data_min,data_maj, neb, gen):
  56. ## Generate a borderline majority batch
  57. ## data_min -> minority class data
  58. ## data_maj -> majority class data
  59. ## neb -> oversampling neighbourhood
  60. ## gen -> convex combinations generated from each neighbourhood
  61. from sklearn.neighbors import NearestNeighbors
  62. from sklearn.utils import shuffle
  63. neigh = NearestNeighbors(neb)
  64. n_feat=data_min.shape[1]
  65. neigh.fit(data_maj)
  66. bmbi=[]
  67. for i in range(len(data_min)):
  68. indices=neigh.kneighbors([data_min[i]],neb,return_distance=False)
  69. bmbi.append(indices)
  70. bmbi=np.unique(np.array(bmbi).flatten())
  71. bmbi=shuffle(bmbi)
  72. bmb=data_maj[np.random.randint(len(data_maj),size=gen)]
  73. bmb=tf.convert_to_tensor(bmb)
  74. return bmb
  75. def NMB_guided(data_min, neb, index):
  76. ## generate a minority neighbourhood batch for a particular minority sample
  77. ## we need this for minority data generation
  78. ## we will generate synthetic samples for each training data neighbourhood
  79. ## index -> index of the minority sample in a training data whose neighbourhood we want to obtain
  80. ## data_min -> minority class data
  81. ## neb -> oversampling neighbourhood
  82. from sklearn.neighbors import NearestNeighbors
  83. from sklearn.utils import shuffle
  84. neigh = NearestNeighbors(neb)
  85. neigh.fit(data_min)
  86. ind=index
  87. nmbi=neigh.kneighbors([data_min[ind]],neb,return_distance=False)
  88. nmbi=shuffle(nmbi)
  89. nmb=data_min[nmbi]
  90. nmb=tf.convert_to_tensor(nmb[0])
  91. return (nmb)
  92. def conv_sample_gen():
  93. ## the generator network to generate synthetic samples from the convex space of arbitrary minority neighbourhoods
  94. min_neb_batch = keras.layers.Input(shape=(n_feat,)) ## takes minority batch as input
  95. x=tf.reshape(min_neb_batch, (1,neb,n_feat), name=None) ## reshaping the 2D tensor to 3D for using 1-D convolution, otherwise 1-D convolution won't work.
  96. x= keras.layers.Conv1D(n_feat, 3, activation='relu')(x) ## using 1-D convolution, feature dimension remains the same
  97. x= keras.layers.Flatten()(x) ## flatten after convolution
  98. x= keras.layers.Dense(neb*gen, activation='relu')(x) ## add dense layer to transform the vector to a convenient dimension
  99. x= keras.layers.Reshape((neb,gen))(x)## again, witching to 2-D tensor once we have the convenient shape
  100. s=K.sum(x,axis=1) ## row wise sum
  101. s_non_zero=tf.keras.layers.Lambda(lambda x: x+.000001)(s) ## adding a small constant to always ensure the row sums are non zero. if this is not done then during initialization the sum can be zero
  102. sinv=tf.math.reciprocal(s_non_zero) ## reprocals of the approximated row sum
  103. x=keras.layers.Multiply()([sinv,x]) ## At this step we ensure that row sum is 1 for every row in x. That means, each row is set of convex co-efficient
  104. aff=tf.transpose(x[0]) ## Now we transpose the matrix. So each column is now a set of convex coefficients
  105. synth=tf.matmul(aff,min_neb_batch) ## We now do matrix multiplication of the affine combinations with the original minority batch taken as input. This generates a convex transformation of the input minority batch
  106. model = Model(inputs=min_neb_batch, outputs=synth) ## finally we compile the generator with an arbitrary minortiy neighbourhood batch as input and a covex space transformation of the same number of samples as output
  107. opt = keras.optimizers.Adam(learning_rate=0.001)
  108. model.compile(loss='mean_squared_logarithmic_error', optimizer=opt)
  109. return model
  110. def maj_min_disc():
  111. ## the discriminator is trained intwo phase:
  112. ## first phase: while training GAN the discriminator learns to differentiate synthetic minority samples generated from convex minority data space against the borderline majority samples
  113. ## second phase: after the GAN generator learns to create synthetic samples, it can be used to generate synthetic samples to balance the dataset
  114. ## and then rettrain the discriminator with the balanced dataset
  115. samples=keras.layers.Input(shape=(n_feat,)) ## takes as input synthetic sample generated as input stacked upon a batch of borderline majority samples
  116. y= keras.layers.Dense(250, activation='relu')(samples) ## passed through two dense layers
  117. y= keras.layers.Dense(125, activation='relu')(y)
  118. output= keras.layers.Dense(2, activation='sigmoid')(y) ## two output nodes. outputs have to be one-hot coded (see labels variable before)
  119. model = Model(inputs=samples, outputs=output) ## compile model
  120. opt = keras.optimizers.Adam(learning_rate=0.0001)
  121. model.compile(loss='binary_crossentropy', optimizer=opt)
  122. return model
  123. def convGAN(generator,discriminator):
  124. ## for joining the generator and the discriminator
  125. ## conv_coeff_generator-> generator network instance
  126. ## maj_min_discriminator -> discriminator network instance
  127. maj_min_disc.trainable=False ## by default the discriminator trainability is switched off.
  128. ## Thus training the GAN means training the generator network as per previously trained discriminator network.
  129. batch_data = keras.layers.Input(shape=(n_feat,)) ## input receives a neighbourhood minority batch and a proximal majority batch concatenated
  130. min_batch = tf.keras.layers.Lambda(lambda x: x[:neb])(batch_data) ## extract minority batch
  131. maj_batch = tf.keras.layers.Lambda(lambda x: x[neb:])(batch_data) ## extract majority batch
  132. conv_samples=generator(min_batch) ## pass minority batch into generator to obtain convex space transformation (synthetic samples) of the minority neighbourhood input batch
  133. new_samples=tf.concat([conv_samples,maj_batch],axis=0) ## concatenate the synthetic samples with the majority samples
  134. output=discriminator(new_samples) ## pass the concatenated vector into the discriminator to know its decisions
  135. ## note that, the discriminator will not be traied but will make decisions based on its previous training while using this function
  136. model = Model(inputs=batch_data, outputs=output)
  137. opt = keras.optimizers.Adam(learning_rate=0.0001)
  138. model.compile(loss='mse', optimizer=opt)
  139. return model
  140. ## this is the main training process where the GAn learns to generate appropriate samples from the convex space
  141. ## this is the first training phase for the discriminator and the only training phase for the generator.
  142. def rough_learning(neb_epochs,data_min,data_maj,neb,gen,generator, discriminator,GAN):
  143. step=1
  144. loss_history=[] ## this is for stroring the loss for every run
  145. min_idx=0
  146. neb_epoch_count=1
  147. labels=[]
  148. for i in range(2*gen):
  149. if i<gen:
  150. labels.append(np.array([1,0]))
  151. else:
  152. labels.append(np.array([0,1]))
  153. labels=np.array(labels)
  154. labels=tf.convert_to_tensor(labels)
  155. while step<(neb_epochs*len(data_min)):
  156. min_batch=NMB_guided(data_min, neb, min_idx) ## generate minority neighbourhood batch for every minority class sampls by index
  157. min_idx=min_idx+1
  158. maj_batch=BMB(data_min,data_maj,neb,gen) ## generate random proximal majority batch
  159. conv_samples=generator.predict(min_batch) ## generate synthetic samples from convex space of minority neighbourhood batch using generator
  160. concat_sample=tf.concat([conv_samples,maj_batch],axis=0) ## concatenate them with the majority batch
  161. discriminator.trainable=True ## switch on discriminator training
  162. discriminator.fit(x=concat_sample,y=labels,verbose=0) ## train the discriminator with the concatenated samples and the one-hot encoded labels
  163. discriminator.trainable=False ## switch off the discriminator training again
  164. gan_loss_history=GAN.fit(concat_sample,y=labels,verbose=0) ## use the GAN to make the generator learn on the decisions made by the previous discriminator training
  165. loss_history.append(gan_loss_history.history['loss']) ## store the loss for the step
  166. if step%10 == 0:
  167. print(str(step)+' neighbourhood batches trained; running neighbourhood epoch ' + str(neb_epoch_count))
  168. if min_idx==len(data_min)-1:
  169. print(str('Neighbourhood epoch '+ str(neb_epoch_count) +' complete'))
  170. neb_epoch_count=neb_epoch_count+1
  171. min_idx=0
  172. step=step+1
  173. run_range=range(1,len(loss_history)+1)
  174. plt.rcParams["figure.figsize"] = (16,10)
  175. plt.xticks(fontsize=20)
  176. plt.yticks(fontsize=20)
  177. plt.xlabel('runs',fontsize=25)
  178. plt.ylabel('loss', fontsize=25)
  179. plt.title('Rough learning loss for discriminator', fontsize=25)
  180. plt.plot(run_range, loss_history)
  181. plt.show()
  182. return generator, discriminator, GAN, loss_history
  183. def rough_learning_predictions(discriminator,test_data_numpy,test_labels_numpy):
  184. ## after the first phase of training the discriminator can be used for classification
  185. ## it already learns to differentiate the convex minority points with majority points during the first training phase
  186. y_pred_2d=discriminator.predict(tf.convert_to_tensor(test_data_numpy))
  187. ## discretisation of the labels
  188. y_pred=np.digitize(y_pred_2d[:,0], [.5])
  189. ## prediction shows a model with good recall and less precision
  190. c=confusion_matrix(test_labels_numpy, y_pred)
  191. f=f1_score(test_labels_numpy, y_pred)
  192. pr=precision_score(test_labels_numpy, y_pred)
  193. rc=recall_score(test_labels_numpy, y_pred)
  194. k=cohen_kappa_score(test_labels_numpy, y_pred)
  195. print('Rough learning confusion matrix:', c)
  196. print('Rough learning f1 score', f)
  197. print('Rough learning precision score', pr)
  198. print('Rough learning recall score', rc)
  199. print('Rough learning kappa score', k)
  200. return c,f,pr,rc,k
  201. def generate_data_for_min_point(data_min,neb,index,synth_num,generator):
  202. ## generate synth_num synthetic points for a particular minoity sample
  203. ## synth_num -> required number of data points that can be generated from a neighbourhood
  204. ## data_min -> minority class data
  205. ## neb -> oversampling neighbourhood
  206. ## index -> index of the minority sample in a training data whose neighbourhood we want to obtain
  207. runs=int(synth_num/neb)+1
  208. synth_set=[]
  209. for run in range(runs):
  210. batch=NMB_guided(data_min, neb, index)
  211. synth_batch=generator.predict(batch)
  212. for i in range(len(synth_batch)):
  213. synth_set.append(synth_batch[i])
  214. synth_set=synth_set[:synth_num]
  215. synth_set=np.array(synth_set)
  216. return(synth_set)
  217. def generate_synthetic_data(data_min,data_maj,neb,generator):
  218. ## roughly claculate the upper bound of the synthetic samples to be generated from each neighbourhood
  219. synth_num=((len(data_maj)-len(data_min))//len(data_min))+1
  220. ## generate synth_num synthetic samples from each minority neighbourhood
  221. synth_set=[]
  222. for i in range(len(data_min)):
  223. synth_i=generate_data_for_min_point(data_min,neb,i,synth_num,generator)
  224. for k in range(len(synth_i)):
  225. synth_set.append(synth_i[k])
  226. synth_set=synth_set[:(len(data_maj)-len(data_min))] ## extract the exact number of synthetic samples needed to exactly balance the two classes
  227. synth_set=np.array(synth_set)
  228. ovs_min_class=np.concatenate((data_min,synth_set),axis=0)
  229. ovs_training_dataset=np.concatenate((ovs_min_class,data_maj),axis=0)
  230. ovs_pca_labels=np.concatenate((np.zeros(len(data_min)),np.zeros(len(synth_set))+1,np.zeros(len(data_maj))+2))
  231. ovs_training_labels=np.concatenate((np.zeros(len(ovs_min_class))+1,np.zeros(len(data_maj))+0))
  232. ovs_training_labels_oh=[]
  233. for i in range(len(ovs_training_dataset)):
  234. if i<len(ovs_min_class):
  235. ovs_training_labels_oh.append(np.array([1,0]))
  236. else:
  237. ovs_training_labels_oh.append(np.array([0,1]))
  238. ovs_training_labels_oh=np.array(ovs_training_labels_oh)
  239. ovs_training_labels_oh=tf.convert_to_tensor(ovs_training_labels_oh)
  240. ## PCA visualization of the synthetic sata
  241. ## observe how the minority samples from convex space have optimal variance and avoids overlap with the majority
  242. pca = PCA(n_components=2)
  243. pca.fit(ovs_training_dataset)
  244. data_pca= pca.transform(ovs_training_dataset)
  245. ## plot PCA
  246. plt.rcParams["figure.figsize"] = (12,12)
  247. colors=['r', 'b', 'g']
  248. plt.xticks(fontsize=20)
  249. plt.yticks(fontsize=20)
  250. plt.xlabel('PCA1',fontsize=25)
  251. plt.ylabel('PCA2', fontsize=25)
  252. plt.title('PCA plot of oversampled data',fontsize=25)
  253. classes = ['minority', 'synthetic minority', 'majority']
  254. scatter=plt.scatter(data_pca[:,0], data_pca[:,1], c=ovs_pca_labels, cmap='Set1')
  255. plt.legend(handles=scatter.legend_elements()[0], labels=classes, fontsize=20)
  256. plt.show()
  257. return ovs_training_dataset, ovs_pca_labels, ovs_training_labels_oh
  258. def final_learning(discriminator, ovs_training_dataset, ovs_training_labels_oh, test_data_numpy, test_labels_numpy, num_epochs):
  259. print('\n')
  260. print('Final round training of the discrminator as a majority-minority classifier')
  261. print('\n')
  262. ## second phase training of the discriminator with balanced data
  263. history_second_learning=discriminator.fit(x=ovs_training_dataset,y=ovs_training_labels_oh, batch_size=20, epochs=num_epochs)
  264. ## loss of the second phase learning smoothly decreses
  265. ## this is because now the data is fixed and diverse convex combinations are no longer fed into the discriminator at every training step
  266. run_range=range(1,num_epochs+1)
  267. plt.rcParams["figure.figsize"] = (16,10)
  268. plt.xticks(fontsize=20)
  269. plt.yticks(fontsize=20)
  270. plt.xlabel('runs',fontsize=25)
  271. plt.ylabel('loss', fontsize=25)
  272. plt.title('Final learning loss for discriminator', fontsize=25)
  273. plt.plot(run_range, history_second_learning.history['loss'])
  274. plt.show()
  275. ## finally after second phase training the discriminator classifier has a more balanced performance
  276. ## meaning better F1-Score
  277. ## the recall decreases but the precision improves
  278. print('\n')
  279. y_pred_2d=discriminator.predict(tf.convert_to_tensor(test_data_numpy))
  280. y_pred=np.digitize(y_pred_2d[:,0], [.5])
  281. c=confusion_matrix(test_labels_numpy, y_pred)
  282. f=f1_score(test_labels_numpy, y_pred)
  283. pr=precision_score(test_labels_numpy, y_pred)
  284. rc=recall_score(test_labels_numpy, y_pred)
  285. k=cohen_kappa_score(test_labels_numpy, y_pred)
  286. print('Final learning confusion matrix:', c)
  287. print('Final learning f1 score', f)
  288. print('Final learning precision score', pr)
  289. print('Final learning recall score', rc)
  290. print('Final learning kappa score', k)
  291. return c,f,pr,rc,k
  292. def convGAN_train_end_to_end(training_data,training_labels,test_data,test_labels, neb, gen, neb_epochs,epochs_retrain_disc):
  293. ##minority class
  294. data_min=training_data[np.where(training_labels == 1)[0]]
  295. ##majority class
  296. data_maj=training_data[np.where(training_labels == 0)[0]]
  297. ## instanciate generator network and visualize architecture
  298. conv_sample_generator=conv_sample_gen()
  299. print(conv_sample_generator.summary())
  300. print('\n')
  301. ## instanciate discriminator network and visualize architecture
  302. maj_min_discriminator=maj_min_disc()
  303. print(maj_min_discriminator.summary())
  304. print('\n')
  305. ## instanciate network and visualize architecture
  306. cg=convGAN(conv_sample_generator, maj_min_discriminator)
  307. print(cg.summary())
  308. print('\n')
  309. print('Training the GAN, first round training of the discrminator as a majority-minority classifier')
  310. print('\n')
  311. ## train gan generator ## rough_train_discriminator
  312. conv_sample_generator, maj_min_discriminator_r ,cg , loss_history=rough_learning(neb_epochs, data_min,data_maj, neb, gen, conv_sample_generator, maj_min_discriminator, cg)
  313. print('\n')
  314. ## rough learning results
  315. c_r,f_r,pr_r,rc_r,k_r=rough_learning_predictions(maj_min_discriminator_r, test_data,test_labels)
  316. print('\n')
  317. ## generate synthetic data
  318. ovs_training_dataset, ovs_pca_labels, ovs_training_labels_oh=generate_synthetic_data(data_min, data_maj, neb, conv_sample_generator)
  319. print('\n')
  320. ## final training results
  321. c,f,pr,rc,k=final_learning(maj_min_discriminator, ovs_training_dataset, ovs_training_labels_oh, test_data, test_labels, epochs_retrain_disc)
  322. return ((c_r,f_r,pr_r,rc_r,k_r),(c,f,pr,rc,k))
  323. ## specify parameters
  324. neb=gen=5 ##neb=gen required
  325. neb_epochs=10
  326. epochs_retrain_disc=50
  327. n_feat=len(features_x[1]) ## number of features
  328. ## Training
  329. np.random.seed(42)
  330. strata=5
  331. results=[]
  332. for seed_perm in range(strata):
  333. features_x,labels_x=unison_shuffled_copies(features_x,labels_x,seed_perm)
  334. #scaler = StandardScaler()
  335. #scaler.fit(features_x)
  336. #features_x=(scaler.transform(features_x))
  337. ### Extracting all features and labels
  338. print('Extracting all features and labels for seed:'+ str(seed_perm)+'\n')
  339. ## Dividing data into training and testing datasets for 10-fold CV
  340. print('Dividing data into training and testing datasets for 10-fold CV for seed:'+ str(seed_perm)+'\n')
  341. label_1=np.where(labels_x == 1)[0]
  342. label_1=list(label_1)
  343. features_1=features_x[label_1]
  344. label_0=np.where(labels_x != 1)[0]
  345. label_0=list(label_0)
  346. len(label_0)
  347. features_0=features_x[label_0]
  348. a=len(features_1)//5
  349. b=len(features_0)//5
  350. fold_1_min=features_1[0:a]
  351. fold_1_maj=features_0[0:b]
  352. fold_1_tst=np.concatenate((fold_1_min,fold_1_maj))
  353. lab_1_tst=np.concatenate((np.zeros(len(fold_1_min))+1, np.zeros(len(fold_1_maj))))
  354. fold_2_min=features_1[a:2*a]
  355. fold_2_maj=features_0[b:2*b]
  356. fold_2_tst=np.concatenate((fold_2_min,fold_2_maj))
  357. lab_2_tst=np.concatenate((np.zeros(len(fold_1_min))+1, np.zeros(len(fold_1_maj))))
  358. fold_3_min=features_1[2*a:3*a]
  359. fold_3_maj=features_0[2*b:3*b]
  360. fold_3_tst=np.concatenate((fold_3_min,fold_3_maj))
  361. lab_3_tst=np.concatenate((np.zeros(len(fold_1_min))+1, np.zeros(len(fold_1_maj))))
  362. fold_4_min=features_1[3*a:4*a]
  363. fold_4_maj=features_0[3*b:4*b]
  364. fold_4_tst=np.concatenate((fold_4_min,fold_4_maj))
  365. lab_4_tst=np.concatenate((np.zeros(len(fold_1_min))+1, np.zeros(len(fold_1_maj))))
  366. fold_5_min=features_1[4*a:]
  367. fold_5_maj=features_0[4*b:]
  368. fold_5_tst=np.concatenate((fold_5_min,fold_5_maj))
  369. lab_5_tst=np.concatenate((np.zeros(len(fold_5_min))+1, np.zeros(len(fold_5_maj))))
  370. fold_1_trn=np.concatenate((fold_2_min,fold_3_min,fold_4_min,fold_5_min, fold_2_maj,fold_3_maj,fold_4_maj,fold_5_maj))
  371. lab_1_trn=np.concatenate((np.zeros(3*a+len(fold_5_min))+1,np.zeros(3*b+len(fold_5_maj))))
  372. fold_2_trn=np.concatenate((fold_1_min,fold_3_min,fold_4_min,fold_5_min,fold_1_maj,fold_3_maj,fold_4_maj,fold_5_maj))
  373. lab_2_trn=np.concatenate((np.zeros(3*a+len(fold_5_min))+1,np.zeros(3*b+len(fold_5_maj))))
  374. fold_3_trn=np.concatenate((fold_2_min,fold_1_min,fold_4_min,fold_5_min,fold_2_maj,fold_1_maj,fold_4_maj,fold_5_maj))
  375. lab_3_trn=np.concatenate((np.zeros(3*a+len(fold_5_min))+1,np.zeros(3*b+len(fold_5_maj))))
  376. fold_4_trn=np.concatenate((fold_2_min,fold_3_min,fold_1_min,fold_5_min,fold_2_maj,fold_3_maj,fold_1_maj,fold_5_maj))
  377. lab_4_trn=np.concatenate((np.zeros(3*a+len(fold_5_min))+1,np.zeros(3*b+len(fold_5_maj))))
  378. fold_5_trn=np.concatenate((fold_2_min,fold_3_min,fold_4_min,fold_1_min,fold_2_maj,fold_3_maj,fold_4_maj,fold_1_maj))
  379. lab_5_trn=np.concatenate((np.zeros(4*a)+1,np.zeros(4*b)))
  380. training_folds_feats=[fold_1_trn,fold_2_trn,fold_3_trn,fold_4_trn,fold_5_trn]
  381. testing_folds_feats=[fold_1_tst,fold_2_tst,fold_3_tst,fold_4_tst,fold_5_tst]
  382. training_folds_labels=[lab_1_trn,lab_2_trn,lab_3_trn,lab_4_trn,lab_5_trn]
  383. testing_folds_labels=[lab_1_tst,lab_2_tst,lab_3_tst,lab_4_tst,lab_5_tst]
  384. for i in range(5):
  385. print('\n')
  386. print('Executing fold: '+str(i+1))
  387. print('\n')
  388. r1,r2=convGAN_train_end_to_end(training_folds_feats[i],training_folds_labels[i],testing_folds_feats[i],testing_folds_labels[i], neb, gen, neb_epochs, epochs_retrain_disc)
  389. results.append(np.array([list(r1[1:]),list(r2[1:])]))
  390. results=np.array(results)
  391. ## Benchmark
  392. mean_rough=np.mean(results[:,0], axis=0)
  393. data_r={'F1-Score_r':[mean_rough[0]], 'Precision_r' : [mean_rough[1]], 'Recall_r' : [mean_rough[2]], 'Kappa_r': [mean_rough[3]]}
  394. df_r=pd.DataFrame(data=data_r)
  395. print('Rough training results:')
  396. print('\n')
  397. print(df_r)
  398. mean_final=np.mean(results[:,1], axis=0)
  399. data_f={'F1-Score_f':[mean_final[0]], 'Precision_f' : [mean_final[1]], 'Recall_f' : [mean_final[2]], 'Kappa_f': [mean_final[3]]}
  400. df_f=pd.DataFrame(data=data_f)
  401. print('Final training results:')
  402. print('\n')
  403. print(df_f)