Browse Source

added Examples provided by Saptarshi and Cagri

Kristian Schultz 4 năm trước cách đây
mục cha
commit
fe4acb9663

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 619 - 0
initialExample/convGAN v0.2.ipynb


+ 782 - 0
initialExample/convGAN v0.3.ipynb

@@ -0,0 +1,782 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Importing libraries"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "import math\n",
+    "import random\n",
+    "import numpy as np\n",
+    "import pandas as pd\n",
+    "import matplotlib.pyplot as plt\n",
+    "import random\n",
+    "from scipy import ndarray\n",
+    "from sklearn.neighbors import NearestNeighbors\n",
+    "from sklearn.decomposition import PCA\n",
+    "from sklearn.metrics import confusion_matrix\n",
+    "from sklearn.metrics import f1_score\n",
+    "from sklearn.metrics import cohen_kappa_score\n",
+    "from sklearn.metrics import precision_score\n",
+    "from sklearn.metrics import recall_score\n",
+    "from collections import Counter\n",
+    "from imblearn.datasets import fetch_datasets\n",
+    "from sklearn.preprocessing import StandardScaler"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import keras\n",
+    "from keras.layers import Dense, Dropout, Input\n",
+    "from keras.models import Model,Sequential\n",
+    "from tqdm import tqdm\n",
+    "from keras.layers.advanced_activations import LeakyReLU\n",
+    "from keras.optimizers import Adam\n",
+    "from keras.optimizers import RMSprop\n",
+    "from keras import losses\n",
+    "from keras import backend as K\n",
+    "import tensorflow as tf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import warnings\n",
+    "warnings.filterwarnings(\"ignore\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from sklearn.neighbors import KNeighborsClassifier\n",
+    "from sklearn.ensemble import RandomForestClassifier\n",
+    "from sklearn.ensemble import GradientBoostingClassifier"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from numpy.random import seed\n",
+    "seed_num=1\n",
+    "seed(seed_num)\n",
+    "tf.random.set_seed(seed_num) "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Import dataset"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "data = fetch_datasets()['yeast_me2']"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Creating label and feature matrices"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "labels_x=data.target ## labels of the data\n",
+    "labels_x.shape"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "features_x=data.data ## features of the data\n",
+    "features_x.shape"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Until now we have obtained the data. We divided it into training and test sets. we separated obtained seperate variables for the majority and miority classes and their labels for both sets."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# convGAN"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from IPython.display import Image\n",
+    "Image(filename='CoSPOV.jpg')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def unison_shuffled_copies(a, b,seed_perm):\n",
+    "    'Shuffling the feature matrix along with the labels with same order'\n",
+    "    np.random.seed(seed_perm)##change seed 1,2,3,4,5\n",
+    "    assert len(a) == len(b)\n",
+    "    p = np.random.permutation(len(a))\n",
+    "    return a[p], b[p]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def BMB(data_min,data_maj, neb, gen):\n",
+    "    \n",
+    "    ## Generate a borderline majority batch\n",
+    "    ## data_min -> minority class data\n",
+    "    ## data_maj -> majority class data\n",
+    "    ## neb -> oversampling neighbourhood\n",
+    "    ## gen -> convex combinations generated from each neighbourhood\n",
+    "    \n",
+    "    from sklearn.neighbors import NearestNeighbors\n",
+    "    from sklearn.utils import shuffle\n",
+    "    neigh = NearestNeighbors(neb)\n",
+    "    n_feat=data_min.shape[1]\n",
+    "    neigh.fit(data_maj)\n",
+    "    bmbi=[]\n",
+    "    for i in range(len(data_min)):\n",
+    "        indices=neigh.kneighbors([data_min[i]],neb,return_distance=False)\n",
+    "        bmbi.append(indices)\n",
+    "    bmbi=np.unique(np.array(bmbi).flatten())\n",
+    "    bmbi=shuffle(bmbi)\n",
+    "    bmb=data_maj[np.random.randint(len(data_maj),size=gen)]\n",
+    "    bmb=tf.convert_to_tensor(bmb)\n",
+    "    return bmb"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def NMB_guided(data_min, neb, index):\n",
+    "    \n",
+    "    ## generate a minority neighbourhood batch for a particular minority sample\n",
+    "    ## we need this for minority data generation\n",
+    "    ## we will generate synthetic samples for each training data neighbourhood\n",
+    "    ## index -> index of the minority sample in a training data whose neighbourhood we want to obtain\n",
+    "    ## data_min -> minority class data\n",
+    "    ## neb -> oversampling neighbourhood\n",
+    "    \n",
+    "    from sklearn.neighbors import NearestNeighbors\n",
+    "    from sklearn.utils import shuffle\n",
+    "    neigh = NearestNeighbors(neb)\n",
+    "    neigh.fit(data_min)\n",
+    "    ind=index\n",
+    "    nmbi=neigh.kneighbors([data_min[ind]],neb,return_distance=False)\n",
+    "    nmbi=shuffle(nmbi)\n",
+    "    nmb=data_min[nmbi]\n",
+    "    nmb=tf.convert_to_tensor(nmb[0])\n",
+    "    return (nmb)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def conv_sample_gen():\n",
+    "    \n",
+    "    ## the generator network to generate synthetic samples from the convex space of arbitrary minority neighbourhoods\n",
+    "    \n",
+    "    min_neb_batch = keras.layers.Input(shape=(n_feat,)) ## takes minority batch as input\n",
+    "    x=tf.reshape(min_neb_batch, (1,neb,n_feat), name=None) ## reshaping the 2D tensor to 3D for using 1-D convolution, otherwise 1-D convolution won't work.\n",
+    "    x= keras.layers.Conv1D(n_feat, 3, activation='relu')(x) ## using 1-D convolution, feature dimension remains the same\n",
+    "    x= keras.layers.Flatten()(x) ## flatten after convolution\n",
+    "    x= keras.layers.Dense(neb*gen, activation='relu')(x) ## add dense layer to transform the vector to a convenient dimension\n",
+    "    x= keras.layers.Reshape((neb,gen))(x)## again, witching to 2-D tensor once we have the convenient shape\n",
+    "    s=K.sum(x,axis=1) ## row wise sum\n",
+    "    s_non_zero=tf.keras.layers.Lambda(lambda x: x+.000001)(s) ## adding a small constant to always ensure the row sums are non zero. if this is not done then during initialization the sum can be zero\n",
+    "    sinv=tf.math.reciprocal(s_non_zero) ## reprocals of the approximated row sum\n",
+    "    x=keras.layers.Multiply()([sinv,x]) ## At this step we ensure that row sum is 1 for every row in x. That means, each row is set of convex co-efficient\n",
+    "    aff=tf.transpose(x[0]) ## Now we transpose the matrix. So each column is now a set of convex coefficients\n",
+    "    synth=tf.matmul(aff,min_neb_batch) ## We now do matrix multiplication of the affine combinations with the original minority batch taken as input. This generates a convex transformation of the input minority batch\n",
+    "    model = Model(inputs=min_neb_batch, outputs=synth) ## finally we compile the generator with an arbitrary minortiy neighbourhood batch as input and a covex space transformation of the same number of samples as output\n",
+    "    opt = keras.optimizers.Adam(learning_rate=0.001)\n",
+    "    model.compile(loss='mean_squared_logarithmic_error', optimizer=opt)\n",
+    "    return model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def maj_min_disc():\n",
+    "    \n",
+    "    ## the discriminator is trained intwo phase:  \n",
+    "    ## first phase: while training GAN the discriminator learns to differentiate synthetic minority samples generated from convex minority data space against the borderline majority samples\n",
+    "    ## second phase: after the GAN generator learns to create synthetic samples, it can be used to generate synthetic samples to balance the dataset\n",
+    "    ## and then rettrain the discriminator with the balanced dataset\n",
+    "    \n",
+    "    samples=keras.layers.Input(shape=(n_feat,)) ## takes as input synthetic sample generated as input stacked upon a batch of borderline majority samples \n",
+    "    y= keras.layers.Dense(250, activation='relu')(samples) ## passed through two dense layers \n",
+    "    y= keras.layers.Dense(125, activation='relu')(y)\n",
+    "    output= keras.layers.Dense(2, activation='sigmoid')(y) ## two output nodes. outputs have to be one-hot coded (see labels variable before)\n",
+    "    model = Model(inputs=samples, outputs=output) ## compile model\n",
+    "    opt = keras.optimizers.Adam(learning_rate=0.0001)\n",
+    "    model.compile(loss='binary_crossentropy', optimizer=opt)\n",
+    "    return model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def convGAN(generator,discriminator):\n",
+    "    \n",
+    "    ## for joining the generator and the discriminator\n",
+    "    ## conv_coeff_generator-> generator network instance\n",
+    "    ## maj_min_discriminator -> discriminator network instance\n",
+    "    \n",
+    "    maj_min_disc.trainable=False ## by default the discriminator trainability is switched off. \n",
+    "    ## Thus training the GAN means training the generator network as per previously trained discriminator network.\n",
+    "    batch_data = keras.layers.Input(shape=(n_feat,)) ## input receives a neighbourhood minority batch and a proximal majority batch concatenated\n",
+    "    min_batch = tf.keras.layers.Lambda(lambda x: x[:neb])(batch_data) ## extract minority batch\n",
+    "    maj_batch = tf.keras.layers.Lambda(lambda x: x[neb:])(batch_data) ## extract majority batch \n",
+    "    conv_samples=generator(min_batch) ## pass minority batch into generator to obtain convex space transformation (synthetic samples) of the minority neighbourhood input batch\n",
+    "    new_samples=tf.concat([conv_samples,maj_batch],axis=0) ## concatenate the synthetic samples with the majority samples  \n",
+    "    output=discriminator(new_samples) ## pass the concatenated vector into the discriminator to know its decisions\n",
+    "    ## note that, the discriminator will not be traied but will make decisions based on its previous training while using this function\n",
+    "    model = Model(inputs=batch_data, outputs=output)\n",
+    "    opt = keras.optimizers.Adam(learning_rate=0.0001)\n",
+    "    model.compile(loss='mse', optimizer=opt)\n",
+    "    return model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "## this is the main training process where the GAn learns to generate appropriate samples from the convex space\n",
+    "## this is the first training phase for the discriminator and the only training phase for the generator.\n",
+    "\n",
+    "\n",
+    "def rough_learning(neb_epochs,data_min,data_maj,neb,gen,generator, discriminator,GAN):\n",
+    "\n",
+    "    \n",
+    "    step=1\n",
+    "    loss_history=[] ## this is for stroring the loss for every run\n",
+    "    min_idx=0\n",
+    "    neb_epoch_count=1\n",
+    "    \n",
+    "    labels=[]\n",
+    "    for i in range(2*gen):\n",
+    "        if i<gen:\n",
+    "            labels.append(np.array([1,0]))\n",
+    "        else:\n",
+    "            labels.append(np.array([0,1]))\n",
+    "    labels=np.array(labels)\n",
+    "    labels=tf.convert_to_tensor(labels)\n",
+    "    \n",
+    "    \n",
+    "    while step<(neb_epochs*len(data_min)):\n",
+    "\n",
+    "        \n",
+    "        min_batch=NMB_guided(data_min, neb, min_idx) ## generate minority neighbourhood batch for every minority class sampls by index\n",
+    "        min_idx=min_idx+1 \n",
+    "        maj_batch=BMB(data_min,data_maj,neb,gen) ## generate random proximal majority batch \n",
+    "\n",
+    "        conv_samples=generator.predict(min_batch) ## generate synthetic samples from convex space of minority neighbourhood batch using generator\n",
+    "        concat_sample=tf.concat([conv_samples,maj_batch],axis=0) ## concatenate them with the majority batch\n",
+    "\n",
+    "        discriminator.trainable=True ## switch on discriminator training\n",
+    "        discriminator.fit(x=concat_sample,y=labels,verbose=0) ## train the discriminator with the concatenated samples and the one-hot encoded labels \n",
+    "        discriminator.trainable=False ## switch off the discriminator training again\n",
+    "\n",
+    "        gan_loss_history=GAN.fit(concat_sample,y=labels,verbose=0) ## use the GAN to make the generator learn on the decisions made by the previous discriminator training\n",
+    "\n",
+    "        loss_history.append(gan_loss_history.history['loss']) ## store the loss for the step\n",
+    "\n",
+    "        if step%10 == 0:\n",
+    "            print(str(step)+' neighbourhood batches trained; running neighbourhood epoch ' + str(neb_epoch_count))\n",
+    "\n",
+    "        if min_idx==len(data_min)-1:\n",
+    "            print(str('Neighbourhood epoch '+ str(neb_epoch_count) +' complete'))\n",
+    "            neb_epoch_count=neb_epoch_count+1\n",
+    "            min_idx=0\n",
+    "\n",
+    "\n",
+    "        step=step+1\n",
+    "    run_range=range(1,len(loss_history)+1)\n",
+    "    plt.rcParams[\"figure.figsize\"] = (16,10)\n",
+    "    plt.xticks(fontsize=20)\n",
+    "    plt.yticks(fontsize=20)\n",
+    "    plt.xlabel('runs',fontsize=25)\n",
+    "    plt.ylabel('loss', fontsize=25)\n",
+    "    plt.title('Rough learning loss for discriminator', fontsize=25)\n",
+    "    plt.plot(run_range, loss_history)\n",
+    "    plt.show()\n",
+    "    return generator, discriminator, GAN, loss_history\n",
+    "    "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def rough_learning_predictions(discriminator,test_data_numpy,test_labels_numpy):\n",
+    "    \n",
+    "    ## after the first phase of training the discriminator can be used for classification \n",
+    "    ## it already learns to differentiate the convex minority points with majority points during the first training phase\n",
+    "    y_pred_2d=discriminator.predict(tf.convert_to_tensor(test_data_numpy))\n",
+    "    ## discretisation of the labels\n",
+    "    y_pred=np.digitize(y_pred_2d[:,0], [.5])\n",
+    "    ## prediction shows a model with good recall and less precision\n",
+    "    c=confusion_matrix(test_labels_numpy, y_pred)\n",
+    "    f=f1_score(test_labels_numpy, y_pred)\n",
+    "    pr=precision_score(test_labels_numpy, y_pred)\n",
+    "    rc=recall_score(test_labels_numpy, y_pred)\n",
+    "    k=cohen_kappa_score(test_labels_numpy, y_pred)\n",
+    "    print('Rough learning confusion matrix:', c)\n",
+    "    print('Rough learning f1 score', f)\n",
+    "    print('Rough learning precision score', pr)\n",
+    "    print('Rough learning recall score', rc)\n",
+    "    print('Rough learning kappa score', k)\n",
+    "    return c,f,pr,rc,k"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def generate_data_for_min_point(data_min,neb,index,synth_num,generator):\n",
+    "    \n",
+    "    ## generate synth_num synthetic points for a particular minoity sample \n",
+    "    ## synth_num -> required number of data points that can be generated from a neighbourhood\n",
+    "    ## data_min -> minority class data\n",
+    "    ## neb -> oversampling neighbourhood\n",
+    "    ## index -> index of the minority sample in a training data whose neighbourhood we want to obtain\n",
+    "    \n",
+    "    runs=int(synth_num/neb)+1\n",
+    "    synth_set=[]\n",
+    "    for run in range(runs):\n",
+    "        batch=NMB_guided(data_min, neb, index)\n",
+    "        synth_batch=generator.predict(batch)\n",
+    "        for i in range(len(synth_batch)):\n",
+    "            synth_set.append(synth_batch[i])\n",
+    "    synth_set=synth_set[:synth_num]\n",
+    "    synth_set=np.array(synth_set)\n",
+    "    return(synth_set)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def generate_synthetic_data(data_min,data_maj,neb,generator):\n",
+    "    \n",
+    "    ## roughly claculate the upper bound of the synthetic samples to be generated from each neighbourhood\n",
+    "    synth_num=((len(data_maj)-len(data_min))//len(data_min))+1\n",
+    "\n",
+    "    ## generate synth_num synthetic samples from each minority neighbourhood\n",
+    "    synth_set=[]\n",
+    "    for i in range(len(data_min)):\n",
+    "        synth_i=generate_data_for_min_point(data_min,neb,i,synth_num,generator)\n",
+    "        for k in range(len(synth_i)):\n",
+    "            synth_set.append(synth_i[k])\n",
+    "    synth_set=synth_set[:(len(data_maj)-len(data_min))] ## extract the exact number of synthetic samples needed to exactly balance the two classes\n",
+    "    synth_set=np.array(synth_set)\n",
+    "    ovs_min_class=np.concatenate((data_min,synth_set),axis=0)\n",
+    "    ovs_training_dataset=np.concatenate((ovs_min_class,data_maj),axis=0)\n",
+    "    ovs_pca_labels=np.concatenate((np.zeros(len(data_min)),np.zeros(len(synth_set))+1,np.zeros(len(data_maj))+2))\n",
+    "    ovs_training_labels=np.concatenate((np.zeros(len(ovs_min_class))+1,np.zeros(len(data_maj))+0))\n",
+    "    ovs_training_labels_oh=[]\n",
+    "    for i in range(len(ovs_training_dataset)):\n",
+    "        if i<len(ovs_min_class):\n",
+    "            ovs_training_labels_oh.append(np.array([1,0]))\n",
+    "        else:\n",
+    "            ovs_training_labels_oh.append(np.array([0,1]))\n",
+    "    ovs_training_labels_oh=np.array(ovs_training_labels_oh)\n",
+    "    ovs_training_labels_oh=tf.convert_to_tensor(ovs_training_labels_oh)\n",
+    "    \n",
+    "    \n",
+    "    ## PCA visualization of the synthetic sata\n",
+    "    ## observe how the minority samples from convex space have optimal variance and avoids overlap with the majority\n",
+    "    pca = PCA(n_components=2)\n",
+    "    pca.fit(ovs_training_dataset)\n",
+    "    data_pca= pca.transform(ovs_training_dataset)\n",
+    "    \n",
+    "    ## plot PCA\n",
+    "    plt.rcParams[\"figure.figsize\"] = (12,12)\n",
+    "\n",
+    "    colors=['r', 'b', 'g']\n",
+    "    plt.xticks(fontsize=20)\n",
+    "    plt.yticks(fontsize=20)\n",
+    "    plt.xlabel('PCA1',fontsize=25)\n",
+    "    plt.ylabel('PCA2', fontsize=25)\n",
+    "    plt.title('PCA plot of oversampled data',fontsize=25)\n",
+    "    classes = ['minority', 'synthetic minority', 'majority']\n",
+    "\n",
+    "    scatter=plt.scatter(data_pca[:,0], data_pca[:,1], c=ovs_pca_labels, cmap='Set1')\n",
+    "    plt.legend(handles=scatter.legend_elements()[0], labels=classes, fontsize=20)\n",
+    "    plt.show()\n",
+    "    \n",
+    "    return ovs_training_dataset, ovs_pca_labels, ovs_training_labels_oh"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "def final_learning(discriminator, ovs_training_dataset, ovs_training_labels_oh, test_data_numpy, test_labels_numpy, num_epochs):\n",
+    "    \n",
+    "    print('\\n')\n",
+    "    print('Final round training of the discrminator as a majority-minority classifier')\n",
+    "    print('\\n')\n",
+    "    ## second phase training of the discriminator with balanced data\n",
+    "    \n",
+    "    history_second_learning=discriminator.fit(x=ovs_training_dataset,y=ovs_training_labels_oh, batch_size=20, epochs=num_epochs)\n",
+    "    \n",
+    "    ## loss of the second phase learning smoothly decreses \n",
+    "    ## this is because now the data is fixed and diverse convex combinations are no longer fed into the discriminator at every training step\n",
+    "    run_range=range(1,num_epochs+1)\n",
+    "    plt.rcParams[\"figure.figsize\"] = (16,10)\n",
+    "    plt.xticks(fontsize=20)\n",
+    "    plt.yticks(fontsize=20)\n",
+    "    plt.xlabel('runs',fontsize=25)\n",
+    "    plt.ylabel('loss', fontsize=25)\n",
+    "    plt.title('Final learning loss for discriminator', fontsize=25)\n",
+    "    plt.plot(run_range, history_second_learning.history['loss'])\n",
+    "    plt.show()\n",
+    "    \n",
+    "    ## finally after second phase training the discriminator classifier has a more balanced performance\n",
+    "    ## meaning better F1-Score\n",
+    "    ## the recall decreases but the precision improves\n",
+    "    print('\\n')\n",
+    "\n",
+    "    y_pred_2d=discriminator.predict(tf.convert_to_tensor(test_data_numpy))\n",
+    "    y_pred=np.digitize(y_pred_2d[:,0], [.5])\n",
+    "    c=confusion_matrix(test_labels_numpy, y_pred)\n",
+    "    f=f1_score(test_labels_numpy, y_pred)\n",
+    "    pr=precision_score(test_labels_numpy, y_pred)\n",
+    "    rc=recall_score(test_labels_numpy, y_pred)\n",
+    "    k=cohen_kappa_score(test_labels_numpy, y_pred)\n",
+    "    print('Final learning confusion matrix:', c)\n",
+    "    print('Final learning f1 score', f)\n",
+    "    print('Final learning precision score', pr)\n",
+    "    print('Final learning recall score', rc)\n",
+    "    print('Final learning kappa score', k)\n",
+    "    return c,f,pr,rc,k"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def convGAN_train_end_to_end(training_data,training_labels,test_data,test_labels, neb, gen, neb_epochs,epochs_retrain_disc):\n",
+    "    \n",
+    "    ##minority class\n",
+    "    data_min=training_data[np.where(training_labels == 1)[0]]\n",
+    "    ##majority class\n",
+    "    data_maj=training_data[np.where(training_labels == 0)[0]]\n",
+    "\n",
+    "   \n",
+    "    ## instanciate generator network and visualize architecture\n",
+    "    conv_sample_generator=conv_sample_gen()\n",
+    "    print(conv_sample_generator.summary())\n",
+    "    print('\\n')\n",
+    "\n",
+    "    ## instanciate discriminator network and visualize architecture\n",
+    "    maj_min_discriminator=maj_min_disc()\n",
+    "    print(maj_min_discriminator.summary())\n",
+    "    print('\\n')\n",
+    "\n",
+    "    ## instanciate network and visualize architecture\n",
+    "    cg=convGAN(conv_sample_generator, maj_min_discriminator)\n",
+    "    print(cg.summary())\n",
+    "    print('\\n')\n",
+    "    \n",
+    "    print('Training the GAN, first round training of the discrminator as a majority-minority classifier')\n",
+    "    print('\\n')\n",
+    "\n",
+    "    ## train gan generator ## rough_train_discriminator\n",
+    "    conv_sample_generator, maj_min_discriminator_r ,cg , loss_history=rough_learning(neb_epochs, data_min,data_maj, neb, gen, conv_sample_generator, maj_min_discriminator, cg)\n",
+    "    print('\\n')\n",
+    "    \n",
+    "    ## rough learning results\n",
+    "    c_r,f_r,pr_r,rc_r,k_r=rough_learning_predictions(maj_min_discriminator_r, test_data,test_labels)\n",
+    "    print('\\n')\n",
+    "    \n",
+    "    ## generate synthetic data\n",
+    "    ovs_training_dataset, ovs_pca_labels, ovs_training_labels_oh=generate_synthetic_data(data_min, data_maj, neb, conv_sample_generator)\n",
+    "    print('\\n')\n",
+    "    \n",
+    "    ## final training results\n",
+    "    c,f,pr,rc,k=final_learning(maj_min_discriminator, ovs_training_dataset, ovs_training_labels_oh, test_data, test_labels, epochs_retrain_disc)\n",
+    "    \n",
+    "    return ((c_r,f_r,pr_r,rc_r,k_r),(c,f,pr,rc,k))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "##specify parameters\n",
+    "\n",
+    "neb=gen=5 ##neb=gen required\n",
+    "neb_epochs=10\n",
+    "epochs_retrain_disc=50\n",
+    "n_feat=len(features_x[1]) ## number of features"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "np.random.seed(42)\n",
+    "strata=5\n",
+    "results=[]\n",
+    "for seed_perm in range(strata):\n",
+    "    \n",
+    "    features_x,labels_x=unison_shuffled_copies(features_x,labels_x,seed_perm)\n",
+    "\n",
+    "    #scaler = StandardScaler()\n",
+    "    #scaler.fit(features_x)\n",
+    "    #features_x=(scaler.transform(features_x))\n",
+    "    \n",
+    "    \n",
+    "    ### Extracting all features and labels\n",
+    "    print('Extracting all features and labels for seed:'+ str(seed_perm)+'\\n')\n",
+    "    \n",
+    "    ## Dividing data into training and testing datasets for 10-fold CV\n",
+    "    print('Dividing data into training and testing datasets for 10-fold CV for seed:'+ str(seed_perm)+'\\n')\n",
+    "    label_1=np.where(labels_x == 1)[0]\n",
+    "    label_1=list(label_1)\n",
+    "    \n",
+    "    features_1=features_x[label_1]\n",
+    "    \n",
+    "    label_0=np.where(labels_x != 1)[0]\n",
+    "    label_0=list(label_0)\n",
+    "    len(label_0)\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "    features_0=features_x[label_0]\n",
+    "    \n",
+    "    a=len(features_1)//5\n",
+    "    b=len(features_0)//5\n",
+    "\n",
+    "    fold_1_min=features_1[0:a]\n",
+    "    fold_1_maj=features_0[0:b]\n",
+    "    fold_1_tst=np.concatenate((fold_1_min,fold_1_maj))\n",
+    "    lab_1_tst=np.concatenate((np.zeros(len(fold_1_min))+1, np.zeros(len(fold_1_maj))))\n",
+    "\n",
+    "    fold_2_min=features_1[a:2*a]\n",
+    "    fold_2_maj=features_0[b:2*b]\n",
+    "    fold_2_tst=np.concatenate((fold_2_min,fold_2_maj))\n",
+    "    lab_2_tst=np.concatenate((np.zeros(len(fold_1_min))+1, np.zeros(len(fold_1_maj))))\n",
+    "\n",
+    "    fold_3_min=features_1[2*a:3*a]\n",
+    "    fold_3_maj=features_0[2*b:3*b]\n",
+    "    fold_3_tst=np.concatenate((fold_3_min,fold_3_maj))\n",
+    "    lab_3_tst=np.concatenate((np.zeros(len(fold_1_min))+1, np.zeros(len(fold_1_maj))))\n",
+    "\n",
+    "    fold_4_min=features_1[3*a:4*a]\n",
+    "    fold_4_maj=features_0[3*b:4*b]\n",
+    "    fold_4_tst=np.concatenate((fold_4_min,fold_4_maj))\n",
+    "    lab_4_tst=np.concatenate((np.zeros(len(fold_1_min))+1, np.zeros(len(fold_1_maj))))\n",
+    "\n",
+    "\n",
+    "    fold_5_min=features_1[4*a:]\n",
+    "    fold_5_maj=features_0[4*b:]\n",
+    "    fold_5_tst=np.concatenate((fold_5_min,fold_5_maj))\n",
+    "    lab_5_tst=np.concatenate((np.zeros(len(fold_5_min))+1, np.zeros(len(fold_5_maj))))\n",
+    "\n",
+    "    fold_1_trn=np.concatenate((fold_2_min,fold_3_min,fold_4_min,fold_5_min, fold_2_maj,fold_3_maj,fold_4_maj,fold_5_maj))\n",
+    "\n",
+    "    lab_1_trn=np.concatenate((np.zeros(3*a+len(fold_5_min))+1,np.zeros(3*b+len(fold_5_maj))))\n",
+    "\n",
+    "    fold_2_trn=np.concatenate((fold_1_min,fold_3_min,fold_4_min,fold_5_min,fold_1_maj,fold_3_maj,fold_4_maj,fold_5_maj))\n",
+    "\n",
+    "    lab_2_trn=np.concatenate((np.zeros(3*a+len(fold_5_min))+1,np.zeros(3*b+len(fold_5_maj))))\n",
+    "\n",
+    "    fold_3_trn=np.concatenate((fold_2_min,fold_1_min,fold_4_min,fold_5_min,fold_2_maj,fold_1_maj,fold_4_maj,fold_5_maj))\n",
+    "\n",
+    "    lab_3_trn=np.concatenate((np.zeros(3*a+len(fold_5_min))+1,np.zeros(3*b+len(fold_5_maj))))\n",
+    "\n",
+    "    fold_4_trn=np.concatenate((fold_2_min,fold_3_min,fold_1_min,fold_5_min,fold_2_maj,fold_3_maj,fold_1_maj,fold_5_maj))\n",
+    "\n",
+    "    lab_4_trn=np.concatenate((np.zeros(3*a+len(fold_5_min))+1,np.zeros(3*b+len(fold_5_maj))))\n",
+    "\n",
+    "    fold_5_trn=np.concatenate((fold_2_min,fold_3_min,fold_4_min,fold_1_min,fold_2_maj,fold_3_maj,fold_4_maj,fold_1_maj))\n",
+    "\n",
+    "    lab_5_trn=np.concatenate((np.zeros(4*a)+1,np.zeros(4*b)))\n",
+    "\n",
+    "\n",
+    "    training_folds_feats=[fold_1_trn,fold_2_trn,fold_3_trn,fold_4_trn,fold_5_trn]\n",
+    "\n",
+    "    testing_folds_feats=[fold_1_tst,fold_2_tst,fold_3_tst,fold_4_tst,fold_5_tst]\n",
+    "\n",
+    "    training_folds_labels=[lab_1_trn,lab_2_trn,lab_3_trn,lab_4_trn,lab_5_trn]\n",
+    "\n",
+    "    testing_folds_labels=[lab_1_tst,lab_2_tst,lab_3_tst,lab_4_tst,lab_5_tst]\n",
+    "    \n",
+    "    \n",
+    "    \n",
+    "    for i in range(5):\n",
+    "        \n",
+    "        print('\\n')\n",
+    "        print('Executing fold: '+str(i+1))\n",
+    "        print('\\n')\n",
+    "        \n",
+    "        r1,r2=convGAN_train_end_to_end(training_folds_feats[i],training_folds_labels[i],testing_folds_feats[i],testing_folds_labels[i], neb, gen, neb_epochs, epochs_retrain_disc)\n",
+    "        results.append(np.array([list(r1[1:]),list(r2[1:])]))\n",
+    "results=np.array(results)\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "mean_rough=np.mean(results[:,0], axis=0)\n",
+    "data_r={'F1-Score_r':[mean_rough[0]], 'Precision_r' : [mean_rough[1]], 'Recall_r' : [mean_rough[2]], 'Kappa_r': [mean_rough[3]]}\n",
+    "df_r=pd.DataFrame(data=data_r)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print('Rough training results:')\n",
+    "print('\\n')\n",
+    "print(df_r)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "mean_final=np.mean(results[:,1], axis=0)\n",
+    "data_f={'F1-Score_f':[mean_final[0]], 'Precision_f' : [mean_final[1]], 'Recall_f' : [mean_final[2]], 'Kappa_f': [mean_final[3]]}\n",
+    "df_f=pd.DataFrame(data=data_f)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print('Final training results:')\n",
+    "print('\\n')\n",
+    "print(df_f)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We have to experiment on proper choice of parameters: neb_epoch and epochs_retrain_disc"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

BIN
initialExample/convGAN.zip


BIN
initialExample/convGAN1.zip


Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 979 - 0
initialExample/convGAN_abalone9-18.ipynb


Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác