Kristian Schultz пре 3 година
родитељ
комит
4d77465ddd
2 измењених фајлова са 595 додато и 14 уклоњено
  1. 575 0
      AutoencoderTest.ipynb
  2. 20 14
      library/generators/autoencoder.py

Разлика између датотеке није приказан због своје велике величине
+ 575 - 0
AutoencoderTest.ipynb


+ 20 - 14
library/generators/autoencoder.py

@@ -28,13 +28,17 @@ import warnings
 warnings.filterwarnings("ignore")
 
 
-def newDense(size, activation="softsign"):
+lossFunction = "mean_squared_logarithmic_error"
+#lossFunction = "mse"
+
+
+def newDense(size, activation="relu"):  # softsign
     initializer = tf.keras.initializers.RandomUniform(minval=0.00001, maxval=float(size))
-    #initializer = "glorot_uniform"
+    initializer = "glorot_uniform"
 
     return Dense(int(size)
         , activation=activation
-        #, kernel_initializer=initializer
+        , kernel_initializer=initializer
         , bias_initializer=initializer
         )
 
@@ -56,7 +60,7 @@ class Autoencoder(GanBaseClass):
         self.autoencoder = None
         self.cg = None
         self.scaler = 1.0
-        self.lossFn = "mse"
+        self.lossFn = lossFunction #"mse"
         self.lossFn = "mean_squared_logarithmic_error"
 
     def reset(self):
@@ -88,16 +92,18 @@ class Autoencoder(GanBaseClass):
 
         d = dataSet.data1
         self.data1 = d
-        self.scaler = 1.1 * tf.reduce_max(tf.abs(d)).numpy()
+        self.scaler = 1.5 * tf.reduce_max(tf.abs(d)).numpy()
         scaleDown = 1.0 / self.scaler
 
         lastLoss = 0.0
         print(f"scaler: {self.scaler}")
 
-        for epoch in range(100):
-            h = self.autoencoder.fit(d, scaleDown * d, epochs=10, shuffle=True)
-            print(str(d[0]) + " →")
-            print(self.scaler * self.autoencoder.predict(np.array([d[0]])))
+        dScaled = scaleDown * d
+        
+        for epoch in range(1000):
+            h = self.autoencoder.fit(d, dScaled, epochs=1, shuffle=True)
+            #print(str(d[0]) + " →")
+            #print(self.scaler * self.autoencoder.predict(np.array([d[0]])))
             loss = h.history["loss"][-1]
             if loss < self.eps:
                 print(f"done in {epoch} rounds")
@@ -169,17 +175,17 @@ class Autoencoder(GanBaseClass):
         ## takes minority batch as input
         dataIn = Input(shape=(self.n_feat,))
         x = dataIn
+        x = newDense(self.n_feat)(x)
 
         ## 
         n = self.n_feat // 2
-        #x = newDense(max(n, self.middleSize))(x)
-        x = newDense(self.n_feat)(x)
+        x = newDense(max(n, self.middleSize))(x)
 
         x = newDense(self.middleSize)(x)
 
         model = Model(inputs=dataIn, outputs=x)
         opt = Adam(learning_rate=0.01)
-        model.compile(loss='mean_squared_logarithmic_error', optimizer=opt)
+        model.compile(loss=lossFunction, optimizer=opt)
 
         print("encoder")
         model.summary()
@@ -197,14 +203,14 @@ class Autoencoder(GanBaseClass):
 
         ## 
         n = self.n_feat // 2
-        x = newDense(max(n, self.middleSize))(x)
+        #x = newDense(max(n, self.middleSize))(x)
 
         #x = newDense(self.n_feat)(x)
         x = newDense(self.n_feat)(x)
 
         model = Model(inputs=dataIn, outputs=x)
         opt = Adam(learning_rate=0.01)
-        model.compile(loss='mean_squared_logarithmic_error', optimizer=opt)
+        model.compile(loss=lossFunction, optimizer=opt)
 
         print("decoder")
         model.summary()

Неке датотеке нису приказане због велике количине промена