当试图调用样本时,我会得到一个内存错误。我的用例与这个教程完全相同。我正在使用scikit-在Python3.5中学习0.18.1。
对于相关函数得分,此帖子建议使用sample_size参数,该参数在调用silhouette_samples之前减少了样本大小。我不确定下采样是否仍能产生可靠的结果,所以我不愿这样做。
我的输入X是一个107545行x12列的dataframe,虽然我只有8gb的内存,但我并不认为它很大。
sklearn.metrics.silhouette_samples(X, labels, metric=’euclidean’)
---------------------------------------------------------------------------
MemoryError Traceback (most recent call last)
<ipython-input-39-7285690e9ce8> in <module>()
----> 1 silhouette_samples(df_scaled, df['Cluster_Label'])
C:\Users\KE56166\AppData\Local\Enthought\Canopy\edm\envs\User\lib\site-packages\sklearn\metrics\cluster\unsupervised.py in silhouette_samples(X, labels, metric, **kwds)
167 check_number_of_labels(len(le.classes_), X.shape[0])
168
--> 169 distances = pairwise_distances(X, metric=metric, **kwds)
170 unique_labels = le.classes_
171 n_samples_per_label = np.bincount(labels, minlength=len(unique_labels))
C:\Users\KE56166\AppData\Local\Enthought\Canopy\edm\envs\User\lib\site-packages\sklearn\metrics\pairwise.py in pairwise_distances(X, Y, metric, n_jobs, **kwds)
1245 func = partial(distance.cdist, metric=metric, **kwds)
1246
-> 1247 return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
1248
1249
C:\Users\KE56166\AppData\Local\Enthought\Canopy\edm\envs\User\lib\site-packages\sklearn\metrics\pairwise.py in _parallel_pairwise(X, Y, func, n_jobs, **kwds)
1088 if n_jobs == 1:
1089 # Special case to avoid picklability checks in delayed
-> 1090 return func(X, Y, **kwds)
1091
1092 # TODO: in some cases, backend='threading' may be appropriate
C:\Users\KE56166\AppData\Local\Enthought\Canopy\edm\envs\User\lib\site-packages\sklearn\metrics\pairwise.py in euclidean_distances(X, Y, Y_norm_squared, squared, X_norm_squared)
244 YY = row_norms(Y, squared=True)[np.newaxis, :]
245
--> 246 distances = safe_sparse_dot(X, Y.T, dense_output=True)
247 distances *= -2
248 distances += XX
C:\Users\KE56166\AppData\Local\Enthought\Canopy\edm\envs\User\lib\site-packages\sklearn\utils\extmath.py in safe_sparse_dot(a, b, dense_output)
138 return ret
139 else:
--> 140 return np.dot(a, b)
141
142
MemoryError: 计算似乎依赖于距离,后者在np.dot调用时崩溃。我不是在处理稀缺问题,所以也许没有解决办法。在计算距离时,我通常使用numpy.linalg.norm(A).这有更好的内存处理吗?
发布于 2017-12-08 00:42:39
更新:PR 11135应该在scikit中解决这个问题-学习,使其余的文章过时。
你有大约100000 = 1e5的样本,它们是12维空间中的点。pairwise_distances方法试图计算它们之间的所有成对距离。即(1e5)**2 = 1e10距离。每一个都是一个浮点数;float64格式需要8个字节的内存。所以距离矩阵的大小是8e10字节,是74.5GB。
这偶尔会在GitHub:#4701,#4197上有报道,答案大概是:这是一个NumPy问题,它不能用这样大小的矩阵来处理np.dot。虽然有一条评论说
也许可以将其分解成子矩阵,以便进行更有效的内存计算。
实际上,如果该方法不是在一开始就形成一个巨大的距离矩阵,而是在标签上的循环中计算它的相关块,那就需要更少的内存。
使用它的来源修改该方法并不难,因此它可以首先屏蔽,而不是先计算距离,然后再应用二进制掩码。这就是我下面所做的。它不需要N**2内存(其中N是样本数),而是需要n**2,其中n是最大的集群大小。
如果这看起来很实用的话,我想它可以通过一些旗子添加到Scikit .但是应该注意的是,这个版本不支持metric='precomputed'。
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.utils import check_X_y
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics.cluster.unsupervised import check_number_of_labels
def silhouette_samples_memory_saving(X, labels, metric='euclidean', **kwds):
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
check_number_of_labels(len(le.classes_), X.shape[0])
unique_labels = le.classes_
n_samples_per_label = np.bincount(labels, minlength=len(unique_labels))
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.zeros(X.shape[0], dtype=X.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf + intra_clust_dists
for curr_label in range(len(unique_labels)):
# Find inter_clust_dist for all samples belonging to the same
# label.
mask = labels == curr_label
# Leave out current sample.
n_samples_curr_lab = n_samples_per_label[curr_label] - 1
if n_samples_curr_lab != 0:
intra_distances = pairwise_distances(X[mask, :], metric=metric, **kwds)
intra_clust_dists[mask] = np.sum(intra_distances, axis=1) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in range(len(unique_labels)):
if other_label != curr_label:
other_mask = labels == other_label
inter_distances = pairwise_distances(X[mask, :], X[other_mask, :], metric=metric, **kwds)
other_distances = np.mean(inter_distances, axis=1)
inter_clust_dists[mask] = np.minimum(inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# score 0 for clusters of size 1, according to the paper
sil_samples[n_samples_per_label.take(labels) == 1] = 0
return sil_samples发布于 2018-02-16 13:07:11
我开发了一个内存效率和相对较快的解决方案欧几里得距离转换使用numba。这使用相对于输入数据大小的大致恒定内存,并使用numba的自动并行化。有了它,就有了一个24维的300000行数据集(这将需要720 it的内存)。可以根据需要修改它以实现其他距离度量。
from sklearn.utils import check_X_y
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics.cluster.unsupervised import check_number_of_labels
from numba import jit
@jit(nogil=True, parallel=True)
def euclidean_distances_numba(X, Y=None, Y_norm_squared=None):
# disable checks
XX_ = (X * X).sum(axis=1)
XX = XX_.reshape((1, -1))
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = Y_norm_squared
else:
YY_ = np.sum(Y * Y, axis=1)
YY = YY_.reshape((1,-1))
distances = np.dot(X, Y.T)
distances *= -2
distances += XX
distances += YY
distances = np.maximum(distances, 0)
return np.sqrt(distances)
@jit(parallel=True)
def euclidean_distances_sum(X, Y=None):
if Y is None:
Y = X
Y_norm_squared = (Y ** 2).sum(axis=1)
sums = np.zeros((len(X)))
for i in range(len(X)):
base_row = X[i, :]
sums[i] = euclidean_distances_numba(base_row.reshape(1, -1), Y, Y_norm_squared=Y_norm_squared).sum()
return sums
@jit(parallel=True)
def euclidean_distances_mean(X, Y=None):
if Y is None:
Y = X
Y_norm_squared = (Y ** 2).sum(axis=1)
means = np.zeros((len(X)))
for i in range(len(X)):
base_row = X[i, :]
means[i] = euclidean_distances_numba(base_row.reshape(1, -1), Y, Y_norm_squared=Y_norm_squared).mean()
return means
def silhouette_samples_memory_saving(X, labels, metric='euclidean', **kwds):
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
check_number_of_labels(len(le.classes_), X.shape[0])
unique_labels = le.classes_
n_samples_per_label = np.bincount(labels, minlength=len(unique_labels))
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.zeros(X.shape[0], dtype=X.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf + intra_clust_dists
for curr_label in range(len(unique_labels)):
# Find inter_clust_dist for all samples belonging to the same label.
mask = labels == curr_label
# Leave out current sample.
n_samples_curr_lab = n_samples_per_label[curr_label] - 1
if n_samples_curr_lab != 0:
intra_clust_dists[mask] = euclidean_distances_sum(X[mask, :]) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in range(len(unique_labels)):
if other_label != curr_label:
other_mask = labels == other_label
other_distances = euclidean_distances_mean(X[mask, :], X[other_mask, :])
inter_clust_dists[mask] = np.minimum(inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# score 0 for clusters of size 1, according to the paper
sil_samples[n_samples_per_label.take(labels) == 1] = 0
return sil_samples发布于 2017-12-11 06:08:02
被接受的答案在记忆上要比官方功能好得多。它从len(data)^2到len(集群)^2。如果集群足够大,那么这仍然会引起问题。我写了以下内容,这是~len(数据),但速度非常慢。
import numpy as np
from sklearn.utils import check_X_y
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics.cluster.unsupervised import check_number_of_labels
def silhouette_samples_newest(X, labels, metric='euclidean', **kwds):
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
unique_labels = le.classes_
check_number_of_labels(len(unique_labels), X.shape[0])
n_samples_per_label = np.bincount(labels, minlength=len(unique_labels))
intra_clust_dists = np.array([np.linalg.norm( X[(labels == labels[i]), :] - point, axis = 1).mean() for i, point in enumerate(X)])
inter_clust_dists = np.array([min([np.linalg.norm( X[(labels == label), :] - point, axis = 1).mean() for label in unique_labels if label!=labels[i]]) for i, point in enumerate(X)])
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# score 0 for clusters of size 1, according to the paper
sil_samples[n_samples_per_label.take(labels) == 1] = 0
return sil_sampleshttps://stackoverflow.com/questions/47702750
复制相似问题