fdc.py 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. # --[ Known to be used ]----
  2. import numpy as np
  3. from numba import jit
  4. import umap.umap_ as umap
  5. from fdc.tools import Timing
  6. # --[ Known to be used but can we avoid it? ]----
  7. import pandas as pd
  8. from fdc.visualize import plotMapping
  9. def value(v, defaultValue):
  10. if v is None:
  11. return defaultValue
  12. else:
  13. return v
  14. def feature_clustering(UMAP_neb, min_dist_UMAP, metric, data, visual=False):
  15. data_embedded = Clustering(metric, UMAP_neb, min_dist_UMAP).fit(data)
  16. result = pd.DataFrame(data=data_embedded, columns=['UMAP_0', 'UMAP_1'])
  17. if visual:
  18. plotMapping(result)
  19. return result
  20. @jit(nopython=True)
  21. def canberra_modified(a,b):
  22. return np.sqrt(np.sum(np.array(
  23. [np.abs(1.0 - x) / (1.0 + np.abs(x)) for x in (np.abs(a-b) + 1.0)]
  24. )))
  25. class Clustering:
  26. def __init__(self, metric='euclidian', UMAP_neb=30, min_dist_UMAP=0.1, max_components=2):
  27. self.metric = metric
  28. self.UMAP_neb = UMAP_neb
  29. self.min_dist_UMAP = min_dist_UMAP
  30. self.max_components = max_components
  31. def normalize(self, x):
  32. return (x - np.mean(x)) / np.std(x)
  33. def fit(self, data):
  34. np.random.seed(42)
  35. # ensure that the data is a 2d array.
  36. if len(data.shape) < 2:
  37. data = data.reshape((data.shape[0], 1))
  38. # do UMAP if needed (e.g. data has more than 2 features)
  39. data_embedded = umap.UMAP(
  40. n_neighbors=self.UMAP_neb
  41. , min_dist=self.min_dist_UMAP
  42. , n_components=self.max_components
  43. , metric=self.metric
  44. , random_state=42
  45. ).fit_transform(data)
  46. # normalize the data
  47. for n in range(data_embedded.shape[1]):
  48. data_embedded[:, n] = self.normalize(data_embedded[:, n])
  49. return data_embedded
  50. class FDC:
  51. def __init__(self,
  52. clustering_cont=None, clustering_ord=None, clustering_nom=None,
  53. visual=False,
  54. with_2d_embedding=False,
  55. use_pandas_output=False
  56. ):
  57. # used clusterings
  58. self.clustering_cont = value(clustering_cont, Clustering('euclidian', 30, 0.1))
  59. self.clustering_ord = value(clustering_ord, Clustering(canberra_modified, 30, 0.1))
  60. self.clustering_nom = value(clustering_nom, Clustering('hamming', 30, 0.1, max_components=1))
  61. # Control of data output
  62. self.use_pandas_output = use_pandas_output
  63. self.with_2d_embedding = with_2d_embedding
  64. # Control if a graph is shown
  65. self.visual = visual
  66. # Lists to select columns for continueous, nomial and ordinal data.
  67. self.cont_list = None
  68. self.nom_list = None
  69. self.ord_list = None
  70. def selectFeatures(self, continueous=None, nomial=None, ordinal=None):
  71. self.cont_list = continueous
  72. self.nom_list = nomial
  73. self.ord_list = ordinal
  74. def normalize(self, data,
  75. cont_list=None, nom_list=None, ord_list=None,
  76. with_2d_embedding=None,
  77. visual=None
  78. ):
  79. timing = Timing("FDC.normalize")
  80. # Take instance value if parameter was not given.
  81. visual = value(visual, self.visual)
  82. with_2d_embedding = value(with_2d_embedding, self.with_2d_embedding)
  83. # Initialize data.
  84. np.random.seed(42)
  85. concat_column_names = []
  86. concat_lists = []
  87. timing.step("init")
  88. # Reducing features into 2dim or 1dim
  89. actions = [
  90. ("CONT", self.clustering_cont, value(cont_list, self.cont_list))
  91. , ("ORD", self.clustering_ord, value(ord_list, self.ord_list))
  92. , ("NOM", self.clustering_nom, value(nom_list, self.nom_list))
  93. ]
  94. for (name, clustering, column_list) in actions:
  95. if column_list is not None:
  96. emb = clustering.fit(data[column_list])
  97. concat_lists.append(emb)
  98. for n in range(emb.shape[1]):
  99. concat_column_names.append(f"{name}_UMAP_{n}")
  100. timing.step(f"clustering {name}")
  101. # Merge results
  102. if concat_lists == []:
  103. raise ValueError("Expected at least one non empty column list.")
  104. result_concat = np.concatenate(concat_lists, axis=1)
  105. timing.step("concat")
  106. # Create 2d embedding from 5d embedding
  107. if with_2d_embedding or visual:
  108. result_reduced = umap.UMAP(
  109. n_neighbors=30
  110. , min_dist=0.001
  111. , n_components=2
  112. , metric='euclidean'
  113. , random_state=42
  114. ).fit_transform(result_concat)
  115. timing.step("umap 5 -> 2")
  116. if self.use_pandas_output:
  117. result_reduced = pd.DataFrame(
  118. data=result_reduced, columns=['UMAP_0', 'UMAP_1'])
  119. timing.step("array -> DataFrame")
  120. # Show mapping if needed
  121. if visual:
  122. if self.use_pandas_output:
  123. plotMapping(result_reduced)
  124. else:
  125. plotMapping(pd.DataFrame(
  126. data=result_reduced, columns=['UMAP_0', 'UMAP_1']))
  127. timing.step("plotting")
  128. # Transform to pandas DataFrame if needed.
  129. if self.use_pandas_output:
  130. result_concat = pd.DataFrame(
  131. data=result_concat, columns=concat_column_names)
  132. timing.step("array -> DataFrame")
  133. timing.step("total")
  134. if with_2d_embedding:
  135. #returns both 5D and 2D embeddings
  136. return result_concat, result_reduced
  137. else:
  138. #returns 5D embedding only
  139. return result_concat