最近我们发现了下面的GaussianNaiveBayes分类器代码
import numpy as np
class GaussianNaiveBayes:
def fit(self, X, y):
n_samples, n_features = X.shape
self._classes = np.unique(y)
n_classes = len(self._classes)
self._mean = np.zeros((n_classes, n_features), dtype=np.float64)
self._var = np.zeros((n_classes, n_features), dtype=np.float64)
self._priors = np.zeros(n_classes, dtype=np.float64)
# calculating the mean, variance and prior P(H) for each class
for i, c in enumerate(self._classes):
X_for_class_c = X[y==c]
self._mean[i, :] = X_for_class_c.mean(axis=0)
self._var[i, :] = X_for_class_c.var(axis=0)
self._priors[i] = X_for_class_c.shape[0] / float(n_samples)
#function for calculating the likelihood, P(E|H), of data X given the mean and variance
def _calculate_likelihood(self, class_idx, x):
mean = self._mean[class_idx]
var = self._var[class_idx]
num = np.exp(- (x-mean)**2 / (2 * var))
denom = np.sqrt(2 * np.pi * var)
return num / denom
#classifications by calculating the posterior probability, P(H|E), of the classes
def predict(self, X):
y_pred = [self._classify_sample(x) for x in X]
return np.array(y_pred)
def _classify_sample(self, x):
posteriors = []
# calculating posterior probability for each class
for i, c in enumerate(self._classes):
prior = np.log(self._priors[i])
posterior = np.sum(np.log(self._calculate_likelihood(i, x)))
posterior = prior + posterior
posteriors.append(posterior)
# return the class with highest posterior probability
return self._classes[np.argmax(posteriors)]
通过以下代码在Iris数据集上尝试上述代码,但收到错误“AttributeError:'GaussianNaiveBayes'对象没有属性'predict'”
iris = datasets.load_iris()
X = pd.DataFrame(iris.data, columns = iris.feature_names)
y = pd.DataFrame(iris.target, columns = ['Target'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2,random_state = 42)
nb = GaussianNaiveBayes()
nb.fit(X_train, y_train)
predictions = nb.predict(X_test)
请求任何指导以显示我的错误
您需要正确缩进代码,并且当
y
是数据帧时,这一行对X
数组进行子集设置将不起作用:同样,此函数也不适用于数据帧:
让我们适当地缩进它:
首先使用您的示例运行fit,您可以看到所有值的fit返回nan:
更改输入:
相关问题 更多 >
编程相关推荐