快速信息增益计算
我需要计算超过10万个特征在超过1万份文档中的信息增益分数,用于文本分类。下面的代码运行得很好,但在整个数据集上非常慢——在笔记本电脑上花费超过一个小时。数据集是20newsgroup,我使用的是scikit-learn中的chi2函数,这个函数运行得非常快。
有没有什么办法可以更快地计算这种数据集的信息增益?
def information_gain(x, y):
def _entropy(values):
counts = np.bincount(values)
probs = counts[np.nonzero(counts)] / float(len(values))
return - np.sum(probs * np.log(probs))
def _information_gain(feature, y):
feature_set_indices = np.nonzero(feature)[1]
feature_not_set_indices = [i for i in feature_range if i not in feature_set_indices]
entropy_x_set = _entropy(y[feature_set_indices])
entropy_x_not_set = _entropy(y[feature_not_set_indices])
return entropy_before - (((len(feature_set_indices) / float(feature_size)) * entropy_x_set)
+ ((len(feature_not_set_indices) / float(feature_size)) * entropy_x_not_set))
feature_size = x.shape[0]
feature_range = range(0, feature_size)
entropy_before = _entropy(y)
information_gain_scores = []
for feature in x.T:
information_gain_scores.append(_information_gain(feature, y))
return information_gain_scores, []
编辑:
我合并了内部函数,并运行了cProfiler
,如下所示(在一个限制为大约1.5万个特征和1000份文档的数据集上):
cProfile.runctx(
"""for feature in x.T:
feature_set_indices = np.nonzero(feature)[1]
feature_not_set_indices = [i for i in feature_range if i not in feature_set_indices]
values = y[feature_set_indices]
counts = np.bincount(values)
probs = counts[np.nonzero(counts)] / float(len(values))
entropy_x_set = - np.sum(probs * np.log(probs))
values = y[feature_not_set_indices]
counts = np.bincount(values)
probs = counts[np.nonzero(counts)] / float(len(values))
entropy_x_not_set = - np.sum(probs * np.log(probs))
result = entropy_before - (((len(feature_set_indices) / float(feature_size)) * entropy_x_set)
+ ((len(feature_not_set_indices) / float(feature_size)) * entropy_x_not_set))
information_gain_scores.append(result)""",
globals(), locals())
根据tottime
的结果前20名:
ncalls tottime percall cumtime percall filename:lineno(function)
1 60.27 60.27 65.48 65.48 <string>:1(<module>)
16171 1.362 0 2.801 0 csr.py:313(_get_row_slice)
16171 0.523 0 0.892 0 coo.py:201(_check)
16173 0.394 0 0.89 0 compressed.py:101(check_format)
210235 0.297 0 0.297 0 {numpy.core.multiarray.array}
16173 0.287 0 0.331 0 compressed.py:631(prune)
16171 0.197 0 1.529 0 compressed.py:534(tocoo)
16173 0.165 0 1.263 0 compressed.py:20(__init__)
16171 0.139 0 1.669 0 base.py:415(nonzero)
16171 0.124 0 1.201 0 coo.py:111(__init__)
32342 0.123 0 0.123 0 {method 'max' of 'numpy.ndarray' objects}
48513 0.117 0 0.218 0 sputils.py:93(isintlike)
32342 0.114 0 0.114 0 {method 'sum' of 'numpy.ndarray' objects}
16171 0.106 0 3.081 0 csr.py:186(__getitem__)
32342 0.105 0 0.105 0 {numpy.lib._compiled_base.bincount}
32344 0.09 0 0.094 0 base.py:59(set_shape)
210227 0.088 0 0.088 0 {isinstance}
48513 0.081 0 1.777 0 fromnumeric.py:1129(nonzero)
32342 0.078 0 0.078 0 {method 'min' of 'numpy.ndarray' objects}
97032 0.066 0 0.153 0 numeric.py:167(asarray)
看起来大部分时间都花在了_get_row_slice
上。我不太确定第一行的情况,似乎它覆盖了我提供给cProfile.runctx
的整个块,但我不知道为什么第一行的totime=60.27
和第二行的tottime=1.362
之间有这么大的差距。那差距是花在哪里的呢?可以在cProfile
中检查吗?
基本上,问题似乎出在稀疏矩阵操作(切片、获取元素)上——解决方案可能是使用矩阵代数来计算信息增益(就像chi2在scikit中的实现)。但我不知道如何用矩阵操作来表达这个计算……有人有主意吗?
3 个回答
这段代码 feature_not_set_indices = [i for i in feature_range if i not in feature_set_indices]
占用了90%的时间,建议尝试用集合操作来替换它。
这里有一个使用矩阵运算的版本。一个特征的信息增益(IG)是它在不同类别下得分的平均值。
import numpy as np
from scipy.sparse import issparse
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import check_array
from sklearn.utils.extmath import safe_sparse_dot
def ig(X, y):
def get_t1(fc, c, f):
t = np.log2(fc/(c * f))
t[~np.isfinite(t)] = 0
return np.multiply(fc, t)
def get_t2(fc, c, f):
t = np.log2((1-f-c+fc)/((1-c)*(1-f)))
t[~np.isfinite(t)] = 0
return np.multiply((1-f-c+fc), t)
def get_t3(c, f, class_count, observed, total):
nfc = (class_count - observed)/total
t = np.log2(nfc/(c*(1-f)))
t[~np.isfinite(t)] = 0
return np.multiply(nfc, t)
def get_t4(c, f, feature_count, observed, total):
fnc = (feature_count - observed)/total
t = np.log2(fnc/((1-c)*f))
t[~np.isfinite(t)] = 0
return np.multiply(fnc, t)
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# counts
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
total = observed.sum(axis=0).reshape(1, -1).sum()
feature_count = X.sum(axis=0).reshape(1, -1)
class_count = (X.sum(axis=1).reshape(1, -1) * Y).T
# probs
f = feature_count / feature_count.sum()
c = class_count / float(class_count.sum())
fc = observed / total
# the feature score is averaged over classes
scores = (get_t1(fc, c, f) +
get_t2(fc, c, f) +
get_t3(c, f, class_count, observed, total) +
get_t4(c, f, feature_count, observed, total)).mean(axis=0)
scores = np.asarray(scores).reshape(-1)
return scores, []
在一个有1000个实例和1000个独特特征的数据集中,这种实现比没有使用矩阵运算的版本快超过100倍。
不知道这是否还有帮助,因为已经过去一年了。不过我现在正好也遇到了同样的文本分类任务。我用稀疏矩阵提供的 nonzero() 函数重写了你的代码。然后我只需扫描 nz,统计对应的 y_value,并计算熵值。
下面的代码在处理 news20 数据集时只需要几秒钟就能完成(这个数据集是用 libsvm 稀疏矩阵格式加载的)。
def information_gain(X, y):
def _calIg():
entropy_x_set = 0
entropy_x_not_set = 0
for c in classCnt:
probs = classCnt[c] / float(featureTot)
entropy_x_set = entropy_x_set - probs * np.log(probs)
probs = (classTotCnt[c] - classCnt[c]) / float(tot - featureTot)
entropy_x_not_set = entropy_x_not_set - probs * np.log(probs)
for c in classTotCnt:
if c not in classCnt:
probs = classTotCnt[c] / float(tot - featureTot)
entropy_x_not_set = entropy_x_not_set - probs * np.log(probs)
return entropy_before - ((featureTot / float(tot)) * entropy_x_set
+ ((tot - featureTot) / float(tot)) * entropy_x_not_set)
tot = X.shape[0]
classTotCnt = {}
entropy_before = 0
for i in y:
if i not in classTotCnt:
classTotCnt[i] = 1
else:
classTotCnt[i] = classTotCnt[i] + 1
for c in classTotCnt:
probs = classTotCnt[c] / float(tot)
entropy_before = entropy_before - probs * np.log(probs)
nz = X.T.nonzero()
pre = 0
classCnt = {}
featureTot = 0
information_gain = []
for i in range(0, len(nz[0])):
if (i != 0 and nz[0][i] != pre):
for notappear in range(pre+1, nz[0][i]):
information_gain.append(0)
ig = _calIg()
information_gain.append(ig)
pre = nz[0][i]
classCnt = {}
featureTot = 0
featureTot = featureTot + 1
yclass = y[nz[1][i]]
if yclass not in classCnt:
classCnt[yclass] = 1
else:
classCnt[yclass] = classCnt[yclass] + 1
ig = _calIg()
information_gain.append(ig)
return np.asarray(information_gain)