XConvGeN.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639
  1. import numpy as np
  2. import matplotlib.pyplot as plt
  3. from library.interfaces import GanBaseClass
  4. from library.dataset import DataSet
  5. from library.timing import timing
  6. from keras.layers import Dense, Input, Multiply, Flatten, Conv1D, Reshape, InputLayer, Add
  7. from keras.models import Model, Sequential
  8. from keras import backend as K
  9. #from tqdm import tqdm
  10. import tensorflow as tf
  11. from tensorflow.keras.optimizers import Adam
  12. from tensorflow.keras.layers import Lambda
  13. import tensorflow_probability as tfp
  14. from sklearn.utils import shuffle
  15. from library.NNSearch import NNSearch, randomIndices
  16. import warnings
  17. warnings.filterwarnings("ignore")
  18. def repeat(x, times):
  19. return [x for _i in range(times)]
  20. def create01Labels(totalSize, sizeFirstHalf):
  21. labels = repeat(np.array([1,0]), sizeFirstHalf)
  22. labels.extend(repeat(np.array([0,1]), totalSize - sizeFirstHalf))
  23. return np.array(labels)
  24. class GeneratorConfig:
  25. def __init__(self, n_feat=None, neb=5, gen=None, neb_epochs=10, genLayerSizes=None, genAddNoise=True):
  26. self.n_feat = n_feat
  27. self.neb = neb
  28. self.gen = gen
  29. self.neb_epochs = neb_epochs
  30. self.genAddNoise = genAddNoise
  31. self.genLayerSizes = genLayerSizes
  32. def isConfigMissing(self):
  33. return any( x is None for x in
  34. [ self.n_feat
  35. , self.neb
  36. , self.gen
  37. , self.genAddNoise
  38. , self.genLayerSizes
  39. , self.neb_epochs
  40. ])
  41. def checkForValidConfig(self):
  42. if self.isConfigMissing():
  43. raise ValueError(f"Some configuration is missing.")
  44. if self.neb > self.gen:
  45. raise ValueError(f"Expected neb <= gen but got neb={self.neb} and gen={self.gen}.")
  46. if sum(self.genLayerSizes) != self.gen:
  47. raise ValueError(f"Expected the layer sizes to sum up to gen={self.gen}.")
  48. return True
  49. def fixMissingValuesByInputData(self, data):
  50. config = GeneratorConfig()
  51. config.neb = self.neb
  52. config.gen = self.gen
  53. config.genAddNoise = self.genAddNoise
  54. config.genLayerSizes = self.genLayerSizes
  55. if data is not None:
  56. if config.n_feat is None:
  57. config.n_feat = data.shape[1]
  58. if config.neb is None:
  59. config.neb = data.shape[0]
  60. else:
  61. config.neb = min(config.neb, data.shape[0])
  62. if config.gen is None:
  63. config.gen = config.neb
  64. if config.genLayerSizes is None:
  65. config.genLayerSizes = [config.gen]
  66. return config
  67. def nebShape(self, aboveSize=None):
  68. if aboveSize is None:
  69. return (self.neb, self.n_feat)
  70. else:
  71. return (aboveSize, self.neb, self.n_feat)
  72. def genShape(self, aboveSize=None):
  73. if aboveSize is None:
  74. return (self.gen, self.n_feat)
  75. else:
  76. return (aboveSize, self.gen, self.n_feat)
  77. class XConvGeN(GanBaseClass):
  78. """
  79. This is the ConvGeN class. ConvGeN is a synthetic point generator for imbalanced datasets.
  80. """
  81. def __init__(self, config=None, fdc=None, debug=False):
  82. self.isTrained = False
  83. self.config = config
  84. self.defaultConfig = config
  85. self.loss_history = None
  86. self.debug = debug
  87. self.minSetSize = 0
  88. self.conv_sample_generator = None
  89. self.maj_min_discriminator = None
  90. self.cg = None
  91. self.canPredict = True
  92. self.fdc = fdc
  93. self.lastProgress = -1
  94. self.timing = { n: timing(n) for n in [
  95. "Train", "BMB", "NbhSearch", "NBH", "GenSamples", "Fit", "FixType"
  96. ] }
  97. if not self.config.isConfigMissing():
  98. self.config.checkForValidConfig()
  99. def reset(self, data):
  100. """
  101. Creates the network.
  102. *dataSet* is a instance of /library.dataset.DataSet/ or None.
  103. It contains the training dataset.
  104. It is used to determine the neighbourhood size if /neb/ in /__init__/ was None.
  105. """
  106. self.isTrained = False
  107. self.config = self.defaultConfig.fixMissingValuesByInputData(data)
  108. self.config.checkForValidConfig()
  109. ## instanciate generator network and visualize architecture
  110. self.conv_sample_generator = self._conv_sample_gen()
  111. ## instanciate discriminator network and visualize architecture
  112. self.maj_min_discriminator = self._maj_min_disc()
  113. ## instanciate network and visualize architecture
  114. self.cg = self._convGeN(self.conv_sample_generator, self.maj_min_discriminator)
  115. self.lastProgress = (-1,-1,-1)
  116. if self.debug:
  117. print(f"neb={self.config.neb}, gen={self.config.gen}")
  118. print(self.conv_sample_generator.summary())
  119. print('\n')
  120. print(self.maj_min_discriminator.summary())
  121. print('\n')
  122. print(self.cg.summary())
  123. print('\n')
  124. def train(self, data, discTrainCount=5, batchSize=32):
  125. """
  126. Trains the Network.
  127. *dataSet* is a instance of /library.dataset.DataSet/. It contains the training dataset.
  128. *discTrainCount* gives the number of extra training for the discriminator for each epoch. (>= 0)
  129. """
  130. if data.shape[0] <= 0:
  131. raise AttributeError("Train: Expected data class 1 to contain at least one point.")
  132. self.timing["Train"].start()
  133. # Store size of minority class. This is needed during point generation.
  134. self.minSetSize = data.shape[0]
  135. normalizedData = data
  136. if self.fdc is not None:
  137. normalizedData = self.fdc.normalize(data)
  138. self.timing["NbhSearch"].start()
  139. # Precalculate neighborhoods
  140. self.nmbMin = NNSearch(self.config.neb).fit(haystack=normalizedData)
  141. self.nmbMin.basePoints = np.array([ [x.astype(np.float32) for x in p] for p in data])
  142. self.timing["NbhSearch"].stop()
  143. # Do the training.
  144. self._rough_learning(data, discTrainCount, batchSize=batchSize)
  145. # Neighborhood in majority class is no longer needed. So save memory.
  146. self.isTrained = True
  147. self.timing["Train"].stop()
  148. def generateDataPoint(self):
  149. """
  150. Returns one synthetic data point by repeating the stored list.
  151. """
  152. return (self.generateData(1))[0]
  153. def generateData(self, numOfSamples=1):
  154. """
  155. Generates a list of synthetic data-points.
  156. *numOfSamples* is a integer > 0. It gives the number of new generated samples.
  157. """
  158. if not self.isTrained:
  159. raise ValueError("Try to generate data with untrained network.")
  160. ## roughly claculate the upper bound of the synthetic samples to be generated from each neighbourhood
  161. synth_num = (numOfSamples // self.minSetSize) + 1
  162. runs = (synth_num // self.config.gen) + 1
  163. ## Get a random list of all indices
  164. indices = randomIndices(self.minSetSize)
  165. ## generate all neighborhoods
  166. def neighborhoodGenerator():
  167. for index in indices:
  168. yield self.nmbMin.getNbhPointsOfItem(index)
  169. neighborhoods = (tf.data.Dataset
  170. .from_generator(neighborhoodGenerator, output_types=tf.float32)
  171. .repeat()
  172. )
  173. batch = neighborhoods.take(runs * self.minSetSize)
  174. synth_batch = self.conv_sample_generator.predict(batch.batch(32), verbose=0)
  175. pairs = tf.data.Dataset.zip(
  176. ( batch
  177. , tf.data.Dataset.from_tensor_slices(synth_batch)
  178. ))
  179. corrected = pairs.map(self.correct_feature_types())
  180. ## extract the exact number of synthetic samples needed to exactly balance the two classes
  181. r = np.concatenate(np.array(list(corrected.take(1 + (numOfSamples // self.config.gen)))), axis=0)[:numOfSamples]
  182. return r
  183. def predictReal(self, data):
  184. """
  185. Uses the discriminator on data.
  186. *data* is a numpy array of shape (n, n_feat) where n is the number of datapoints and n_feat the number of features.
  187. """
  188. prediction = self.maj_min_discriminator.predict(data)
  189. return np.array([x[0] for x in prediction])
  190. # ###############################################################
  191. # Hidden internal functions
  192. # ###############################################################
  193. # Creating the Network: Generator
  194. def _conv_sample_gen(self):
  195. """
  196. The generator network to generate synthetic samples from the convex space
  197. of arbitrary minority neighbourhoods
  198. """
  199. n_feat = self.config.n_feat
  200. neb = self.config.neb
  201. gen = self.config.gen
  202. genLayerSizes = self.config.genLayerSizes
  203. ## takes minority batch as input
  204. min_neb_batch = Input(shape=(neb, n_feat))
  205. ## using 1-D convolution, feature dimension remains the same
  206. x = Conv1D(n_feat, 3, activation='relu', name="UnsharpenInput")(min_neb_batch)
  207. ## flatten after convolution
  208. x = Flatten(name="InputMatrixToVector")(x)
  209. synth = []
  210. n = 0
  211. if sum(genLayerSizes) < gen:
  212. genLayerSizes.append(gen)
  213. for layerSize in genLayerSizes:
  214. w = min(layerSize, gen - n)
  215. if w <= 0:
  216. break
  217. n += w
  218. ## add dense layer to transform the vector to a convenient dimension
  219. y = Dense(neb * w, activation='relu', name=f"P{n}_dense")(x)
  220. ## again, witching to 2-D tensor once we have the convenient shape
  221. y = Reshape((neb, w), name=f"P{n}_reshape")(y)
  222. ## column wise sum
  223. s = K.sum(y, axis=1)
  224. ## adding a small constant to always ensure the column sums are non zero.
  225. ## if this is not done then during initialization the sum can be zero.
  226. s_non_zero = Lambda(lambda x: x + .000001, name=f"P{n}_make_non_zero")(s)
  227. ## reprocals of the approximated column sum
  228. sinv = tf.math.reciprocal(s_non_zero, name=f"P{n}_invert")
  229. ## At this step we ensure that column sum is 1 for every row in x.
  230. ## That means, each column is set of convex co-efficient
  231. y = Multiply(name=f"P{n}_normalize")([sinv, y])
  232. ## Now we transpose the matrix. So each row is now a set of convex coefficients
  233. aff = tf.transpose(y[0], name=f"P{n}_transpose")
  234. ## We now do matrix multiplication of the affine combinations with the original
  235. ## minority batch taken as input. This generates a convex transformation
  236. ## of the input minority batch
  237. y = tf.matmul(aff, min_neb_batch, name=f"P{n}_project")
  238. synth.append(y)
  239. synth = tf.concat(synth, axis=1, name="collect_planes")
  240. nOut = gen * n_feat
  241. if self.config.genAddNoise:
  242. noiseGenerator = Sequential([
  243. InputLayer(input_shape=(gen, n_feat)),
  244. Flatten(),
  245. Dense(tfp.layers.IndependentNormal.params_size(nOut)),
  246. tfp.layers.IndependentNormal(nOut)
  247. ], name="RandomNoise")
  248. noise = noiseGenerator(synth)
  249. noise = Reshape((gen, n_feat), name="ReshapeNoise")(noise)
  250. synth = Add(name="AddNoise")([synth, noise])
  251. ## finally we compile the generator with an arbitrary minortiy neighbourhood batch
  252. ## as input and a covex space transformation of the same number of samples as output
  253. model = Model(inputs=min_neb_batch, outputs=synth)
  254. opt = Adam(learning_rate=0.001)
  255. model.compile(loss='mean_squared_logarithmic_error', optimizer=opt)
  256. return model
  257. # Creating the Network: discriminator
  258. def _maj_min_disc(self):
  259. """
  260. the discriminator is trained in two phase:
  261. first phase: while training ConvGeN the discriminator learns to differentiate synthetic
  262. minority samples generated from convex minority data space against
  263. the borderline majority samples
  264. second phase: after the ConvGeN generator learns to create synthetic samples,
  265. it can be used to generate synthetic samples to balance the dataset
  266. and then rettrain the discriminator with the balanced dataset
  267. """
  268. ## takes as input synthetic sample generated as input stacked upon a batch of
  269. ## borderline majority samples
  270. samples = Input(shape=(self.config.n_feat,))
  271. ## passed through two dense layers
  272. y = Dense(250, activation='relu')(samples)
  273. y = Dense(125, activation='relu')(y)
  274. y = Dense(75, activation='relu')(y)
  275. ## two output nodes. outputs have to be one-hot coded (see labels variable before)
  276. output = Dense(2, activation='sigmoid')(y)
  277. ## compile model
  278. model = Model(inputs=samples, outputs=output)
  279. opt = Adam(learning_rate=0.0001)
  280. model.compile(loss='binary_crossentropy', optimizer=opt)
  281. return model
  282. # Creating the Network: ConvGeN
  283. def _convGeN(self, generator, discriminator):
  284. """
  285. for joining the generator and the discriminator
  286. conv_coeff_generator-> generator network instance
  287. maj_min_discriminator -> discriminator network instance
  288. """
  289. n_feat = self.config.n_feat
  290. neb = self.config.neb
  291. gen = self.config.gen
  292. ## by default the discriminator trainability is switched off.
  293. ## Thus training ConvGeN means training the generator network as per previously
  294. ## trained discriminator network.
  295. discriminator.trainable = False
  296. # Shape of data: (batchSize, 2, gen, n_feat)
  297. # Shape of labels: (batchSize, 2 * gen, 2)
  298. ## input receives a neighbourhood minority batch
  299. ## and a proximal majority batch concatenated
  300. batch_data = Input(shape=(2, gen, n_feat))
  301. # batch_data: (batchSize, 2, gen, n_feat)
  302. ## extract minority batch
  303. min_batch = Lambda(lambda x: x[:, 0, : ,:], name="SplitForGen")(batch_data)
  304. # min_batch: (batchSize, gen, n_feat)
  305. ## extract majority batch
  306. maj_batch = Lambda(lambda x: x[:, 1, :, :], name="SplitForDisc")(batch_data)
  307. # maj_batch: (batchSize, gen, n_feat)
  308. maj_batch = tf.reshape(maj_batch, (-1, n_feat), name="ReshapeForDisc")
  309. # maj_batch: (batchSize * gen, n_feat)
  310. ## pass minority batch into generator to obtain convex space transformation
  311. ## (synthetic samples) of the minority neighbourhood input batch
  312. conv_samples = generator(min_batch)
  313. # conv_batch: (batchSize, gen, n_feat)
  314. conv_samples = tf.reshape(conv_samples, (-1, n_feat), name="ReshapeGenOutput")
  315. # conv_batch: (batchSize * gen, n_feat)
  316. ## pass samples into the discriminator to know its decisions
  317. conv_samples = discriminator(conv_samples)
  318. conv_samples = tf.reshape(conv_samples, (-1, gen, 2), name="ReshapeGenDiscOutput")
  319. # conv_batch: (batchSize * gen, 2)
  320. maj_batch = discriminator(maj_batch)
  321. maj_batch = tf.reshape(maj_batch, (-1, gen, 2), name="ReshapeMajDiscOutput")
  322. # conv_batch: (batchSize * gen, 2)
  323. ## concatenate the decisions
  324. output = tf.concat([conv_samples, maj_batch],axis=1)
  325. # output: (batchSize, 2 * gen, 2)
  326. ## note that, the discriminator will not be traied but will make decisions based
  327. ## on its previous training while using this function
  328. model = Model(inputs=batch_data, outputs=output)
  329. opt = Adam(learning_rate=0.0001)
  330. model.compile(loss='mse', optimizer=opt)
  331. return model
  332. # Training
  333. def _rough_learning(self, data, discTrainCount, batchSize=32):
  334. n_feat = self.config.n_feat
  335. neb = self.config.neb
  336. gen = self.config.gen
  337. generator = self.conv_sample_generator
  338. discriminator = self.maj_min_discriminator
  339. convGeN = self.cg
  340. loss_history = [] ## this is for stroring the loss for every run
  341. minSetSize = len(data)
  342. ## Create labels for one neighborhood training.
  343. nLabels = 2 * gen
  344. labels = np.array(create01Labels(nLabels, gen))
  345. labelsGeN = np.array([labels])
  346. def getNeighborhoods():
  347. for index in range(self.minSetSize):
  348. yield indexToBatches(index)
  349. def indexToBatches(min_idx):
  350. self.timing["NBH"].start()
  351. ## generate minority neighbourhood batch for every minority class sampls by index
  352. min_batch_indices = self.nmbMin.neighbourhoodOfItem(min_idx)
  353. min_batch = self.nmbMin.getPointsFromIndices(min_batch_indices)
  354. ## generate random proximal majority batch
  355. maj_batch = self._BMB(min_batch_indices)
  356. self.timing["NBH"].stop()
  357. return (min_batch, maj_batch)
  358. def unbatch(parts):
  359. def fn():
  360. for part in parts:
  361. for neighborhood in part:
  362. for x in neighborhood:
  363. yield x
  364. return fn
  365. def genLabels():
  366. for min_idx in range(minSetSize):
  367. for x in labels:
  368. yield x
  369. padd = np.zeros((gen - neb, n_feat))
  370. discTrainCount = 1 + max(0, discTrainCount)
  371. for neb_epoch_count in range(self.config.neb_epochs):
  372. self.progressBar(neb_epoch_count / self.config.neb_epochs)
  373. ## Training of the discriminator.
  374. #
  375. # Get all neighborhoods and synthetic points as data stream.
  376. nbhPairs = tf.data.Dataset.from_generator(getNeighborhoods, output_types=tf.float32).repeat().take(discTrainCount * self.minSetSize)
  377. nbhMin = nbhPairs.map(lambda x: x[0])
  378. batchMaj = nbhPairs.map(lambda x: x[1])
  379. fnCt = self.correct_feature_types()
  380. synth_batch = self.conv_sample_generator.predict(nbhMin.batch(32), verbose=0)
  381. pairMinMaj = tf.data.Dataset.zip(
  382. ( nbhMin
  383. , tf.data.Dataset.from_tensor_slices(synth_batch)
  384. , batchMaj
  385. )).map(lambda x, y, z: [fnCt(x,y), z])
  386. a = tf.data.Dataset.from_generator(unbatch(pairMinMaj), output_types=tf.float32)
  387. # Get all labels as data stream.
  388. b = tf.data.Dataset.from_tensor_slices(labels).repeat()
  389. # Zip data and matching labels together for training.
  390. samples = tf.data.Dataset.zip((a, b)).batch(batchSize * 2 * gen)
  391. # train the discriminator with the concatenated samples and the one-hot encoded labels
  392. self.timing["Fit"].start()
  393. discriminator.trainable = True
  394. discriminator.fit(x=samples, verbose=0)
  395. discriminator.trainable = False
  396. self.timing["Fit"].stop()
  397. ## use the complete network to make the generator learn on the decisions
  398. ## made by the previous discriminator training
  399. #
  400. # Get all neighborhoods as data stream.
  401. a = (tf.data.Dataset
  402. .from_generator(getNeighborhoods, output_types=tf.float32)
  403. .map(lambda x: [[tf.concat([x[0], padd], axis=0), x[1]]]))
  404. # Get all labels as data stream.
  405. b = tf.data.Dataset.from_tensor_slices(labelsGeN).repeat()
  406. # Zip data and matching labels together for training.
  407. samples = tf.data.Dataset.zip((a, b)).batch(batchSize)
  408. # Train with the data stream. Store the loss for later usage.
  409. gen_loss_history = convGeN.fit(samples, verbose=0, batch_size=batchSize)
  410. loss_history.append(gen_loss_history.history['loss'])
  411. self.progressBar(1.0)
  412. ## When done: print some statistics.
  413. if self.debug:
  414. run_range = range(1, len(loss_history) + 1)
  415. plt.rcParams["figure.figsize"] = (16,10)
  416. plt.xticks(fontsize=20)
  417. plt.yticks(fontsize=20)
  418. plt.xlabel('runs', fontsize=25)
  419. plt.ylabel('loss', fontsize=25)
  420. plt.title('Rough learning loss for discriminator', fontsize=25)
  421. plt.plot(run_range, loss_history)
  422. plt.show()
  423. ## When done: print some statistics.
  424. self.loss_history = loss_history
  425. def _BMB(self, min_idxs):
  426. ## Generate a borderline majority batch
  427. ## data_maj -> majority class data
  428. ## min_idxs -> indices of points in minority class
  429. ## gen -> convex combinations generated from each neighbourhood
  430. self.timing["BMB"].start()
  431. indices = randomIndices(self.minSetSize, outputSize=self.config.gen, indicesToIgnore=min_idxs)
  432. r = self.nmbMin.basePoints[indices]
  433. self.timing["BMB"].stop()
  434. return r
  435. def retrainDiscriminitor(self, data, labels):
  436. self.maj_min_discriminator.trainable = True
  437. labels = np.array([ [x, 1 - x] for x in labels])
  438. self.maj_min_discriminator.fit(x=data, y=labels, batch_size=20, epochs=self.config.neb_epochs)
  439. self.maj_min_discriminator.trainable = False
  440. def progressBar(self, x):
  441. barWidth = 40
  442. x = int(x * barWidth)
  443. if self.lastProgress == x:
  444. return
  445. def bar(v):
  446. v = min(v, barWidth)
  447. r = ("=" * v) + (" " * (barWidth - v))
  448. return r
  449. print(f"[{bar(x)}]", end="\r")
  450. def correct_feature_types(self):
  451. # batch[0] = original points (gen x n_feat)
  452. # batch[1] = synthetic points (gen x n_feat)
  453. @tf.function
  454. def voidFunction(reference, synth):
  455. return synth
  456. if self.fdc is None:
  457. return voidFunction
  458. columns = set(self.fdc.nom_list or [])
  459. for y in (self.fdc.ord_list or []):
  460. columns.add(y)
  461. columns = list(columns)
  462. if len(columns) == 0:
  463. return voidFunction
  464. neb = self.config.neb
  465. n_feat = self.config.n_feat
  466. nn = tf.constant([(1.0 if x in columns else 0.0) for x in range(n_feat)])
  467. if n_feat is None:
  468. print("ERRROR n_feat is None")
  469. if nn is None:
  470. print("ERRROR nn is None")
  471. @tf.function
  472. def bestMatchOf(vi):
  473. value = vi[0]
  474. c = vi[1][0]
  475. r = vi[2]
  476. if c != 0.0:
  477. d = tf.abs(value - r)
  478. return r[tf.math.argmin(d)]
  479. else:
  480. return value[0]
  481. @tf.function
  482. def indexted(v, rt):
  483. vv = tf.reshape(tf.repeat([v], neb, axis=1), (n_feat, neb))
  484. vn = tf.reshape(tf.repeat([nn], neb, axis=1), (n_feat, neb))
  485. return tf.stack((vv, vn, rt), axis=1)
  486. @tf.function
  487. def correctVector(v, rt):
  488. return tf.map_fn(lambda x: bestMatchOf(x), indexted(v, rt))
  489. @tf.function
  490. def fn(reference, synth):
  491. rt = tf.transpose(reference)
  492. return tf.map_fn(lambda x: correctVector(x, rt), synth)
  493. return fn