fdc.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. # --[ Known to be used ]----
  2. import numpy as np
  3. from numba import jit
  4. import umap.umap_ as umap
  5. # --[ Known to be used but can we avoid it? ]----
  6. import pandas as pd
  7. from fdc.visualize import plotMapping
  8. def value(v, defaultValue):
  9. if v is None:
  10. return defaultValue
  11. else:
  12. return v
  13. def feature_clustering(UMAP_neb, min_dist_UMAP, metric, data, visual=False):
  14. data_embedded = Clustering(metric, UMAP_neb, min_dist_UMAP).fit(data)
  15. result = pd.DataFrame(data=data_embedded, columns=['UMAP_0', 'UMAP_1'])
  16. if visual:
  17. plotMapping(result)
  18. return result
  19. @jit(nopython=True)
  20. def canberra_modified(a,b):
  21. return np.sqrt(np.sum(np.array(
  22. [np.abs(1.0 - x) / (1.0 + np.abs(x)) for x in (np.abs(a-b) + 1.0)]
  23. )))
  24. class Clustering:
  25. def __init__(self, metric='euclidian', UMAP_neb=30, min_dist_UMAP=0.1, max_components=2):
  26. self.metric = metric
  27. self.UMAP_neb = UMAP_neb
  28. self.min_dist_UMAP = min_dist_UMAP
  29. self.max_components = max_components
  30. def normalize(self, x):
  31. return (x - np.mean(x)) / np.std(x)
  32. def fit(self, data):
  33. np.random.seed(42)
  34. # ensure that the data is a 2d array.
  35. if len(data.shape) < 2:
  36. data = data.reshape((data.shape[0], 1))
  37. # do UMAP if needed (e.g. data has more than 2 features)
  38. if data.shape[1] > self.max_components:
  39. data_embedded = umap.UMAP(
  40. n_neighbors=self.UMAP_neb
  41. , min_dist=self.min_dist_UMAP
  42. , n_components=self.max_components
  43. , metric=self.metric
  44. , random_state=42
  45. ).fit_transform(data)
  46. else:
  47. data_embedded = data
  48. # normalize the data
  49. for n in range(data_embedded.shape[1]):
  50. data_embedded[:, n] = self.normalize(data_embedded[:, n])
  51. return data_embedded
  52. class FDC:
  53. def __init__(self,
  54. clustering_cont=None, clustering_ord=None, clustering_nom=None,
  55. visual=False,
  56. with_2d_embedding=False,
  57. use_pandas_output=False
  58. ):
  59. # used clusterings
  60. self.clustering_cont = value(clustering_cont, Clustering('euclidian', 30, 0.1))
  61. self.clustering_ord = value(clustering_ord, Clustering(canberra_modified, 30, 0.1))
  62. self.clustering_nom = value(clustering_nom, Clustering('hamming', 30, 0.1, max_components=1))
  63. # Control of data output
  64. self.use_pandas_output = use_pandas_output
  65. self.with_2d_embedding = with_2d_embedding
  66. # Control if a graph is shown
  67. self.visual = visual
  68. # Lists to select columns for continueous, nomial and ordinal data.
  69. self.cont_list = None
  70. self.nom_list = None
  71. self.ord_list = None
  72. def selectFeatures(self, continueous=None, nomial=None, ordinal=None):
  73. self.cont_list = continueous
  74. self.nom_list = nomial
  75. self.ord_list = ordinal
  76. def normalize(self, data,
  77. cont_list=None, nom_list=None, ord_list=None,
  78. with_2d_embedding=None,
  79. visual=None
  80. ):
  81. # Take instance value if parameter was not given.
  82. visual = value(visual, self.visual)
  83. with_2d_embedding = value(with_2d_embedding, self.with_2d_embedding)
  84. # Initialize data.
  85. np.random.seed(42)
  86. concat_column_names = []
  87. concat_lists = []
  88. # Reducing features into 2dim or 1dim
  89. actions = [
  90. ("CONT", self.clustering_cont, value(cont_list, self.cont_list))
  91. , ("ORD", self.clustering_ord, value(ord_list, self.ord_list))
  92. , ("CONT", self.clustering_nom, value(nom_list, self.nom_list))
  93. ]
  94. for (name, clustering, column_list) in actions:
  95. if column_list is not None:
  96. emb = clustering.fit(data[column_list])
  97. concat_lists.append(emb)
  98. for n in range(emb.shape[1]):
  99. concat_column_names.append(f"{name}_UMAP_{n}")
  100. # Merge results
  101. if concat_lists == []:
  102. raise ValueError("Expected at least one non empty column list.")
  103. result_concat = np.concatenate(concat_lists, axis=1)
  104. # Create 2d embedding from 5d embedding
  105. if with_2d_embedding or visual:
  106. result_reduced = umap.UMAP(
  107. n_neighbors=30
  108. , min_dist=0.001
  109. , n_components=2
  110. , metric='euclidean'
  111. , random_state=42
  112. ).fit_transform(result_concat)
  113. if self.use_pandas_output:
  114. result_reduced = pd.DataFrame(
  115. data=result_reduced, columns=['UMAP_0', 'UMAP_1'])
  116. # Show mapping if needed
  117. if visual:
  118. if self.use_pandas_output:
  119. plotMapping(result_reduced)
  120. else:
  121. plotMapping(pd.DataFrame(
  122. data=result_reduced, columns=['UMAP_0', 'UMAP_1']))
  123. # Transform to pandas DataFrame if needed.
  124. if self.use_pandas_output:
  125. result_concat = pd.DataFrame(
  126. data=result_concat, columns=concat_column_names)
  127. if with_2d_embedding:
  128. #returns both 5D and 2D embeddings
  129. return result_concat, result_reduced
  130. else:
  131. #returns 5D embedding only
  132. return result_concat