所以我是一个真正的业余爱好者,试图在python中实现一种你可能称之为朴素贝叶斯算法的“简化”版本,似乎有很多麻烦,原因可能是我不太确定我不完全理解算法的工作方式……不过,我还是非常感谢大家的帮助/建议。这是我的代码:
class GaussianNB(object):
def __init__(self):
'''
Constructor
'''
# This variable will hold the gaussian distribution over your data
# In fact, you need a distribution per class for each feature variable.
# This can be done as a list of lists.
self.classmodels_count = {}
self.classmodels = {}
self.classmodelsMeanAndVariance = {}
self.featureTokenCount= 0;
self.featureTypeCount = 0;
def train(self, trainingdata):
for i in trainingdata:
current_class = i[0]
features = i[1]
if self.classmodels.has_key(current_class):
current_class_model = self.classmodels[current_class]
self.classmodels_count[current_class] = self.classmodels_count[current_class] + 1
else:
current_class_model = {}
self.classmodels_count[current_class] = 1
for f in features:
feature = f[0]
value = f[1]
if current_class_model.has_key(feature):
list_of_values = current_class_model[feature]
list_of_values.append(value)
current_class_model[feature] = list_of_values
else:
list_of_values = []
list_of_values.append(value)
current_class_model[feature] = list_of_values
self.classmodels[current_class] = current_class_model
for a_class in self.classmodels.keys():
a_class_model = self.classmodels[a_class]
a_class_model_mean_and_variance = {}
for feature in a_class_model.keys():
a_class_model_mean_and_variance[feature] = findMeanSD(np.array(a_class_model[feature]))
self.classmodelsMeanAndVariance[a_class] = a_class_model_mean_and_variance
def classify(self, testing_vecs):
outputs = []
for vec in testing_vecs:
features = vec[1]
class_model_output_prob = {}
for a_class in self.classmodelsMeanAndVariance.keys():
a_class_output_prob = 0.0
a_class_model_mean_and_variance = self.classmodelsMeanAndVariance[a_class]
for feature_value in features:
feature = feature_value[0]
value = feature_value[1]
#simply ignore a feature if its not seen in training
if(a_class_model_mean_and_variance.has_key(feature)):
feature_mean = a_class_model_mean_and_variance[feature][0]
feature_std = a_class_model_mean_and_variance[feature][1]
a_class_output_prob = a_class_output_prob + math.log10(norm(value,feature_mean,feature_std))
#ignoring P(class) prior.. assuming equal priors
class_model_output_prob[a_class_output_prob] = a_class
probs = class_model_output_prob.keys()
print probs
probs.sort()
max_prob = probs[len(probs)-1]
max_class =class_model_output_prob[max_prob]
outputs.append(max_class)
return outputs 在一些数据上运行时,我得到的错误是
回溯(最近一次调用):文件"C:\Users\Toshiba\workspace\Assignment6\src\gnb_test.py",第34行,在高斯= Model.train(testData)文件特征行91中,正在训练f in "C:\Users\Toshiba\workspace\Assignment6\src\gnb.py",:TypeError:'numpy.float64‘对象不可迭代
我一点也不明白这是什么意思
发布于 2013-12-10 09:49:26
回溯表明问题在于您试图遍历features,但features是一个浮点数,而不是列表或元组--基本上,它不能分解为单独的元素。我认为它是一个浮点,因为线条
for i in trainingdata:
current_class = i[0]
features = i[1]建议将features重写为一系列连续的数字,而您似乎想要的是将这些数字保存为可迭代类型。试一试
features = []
for i in trainingdata:
current_class = i[0]
features.append(i[1])https://stackoverflow.com/questions/20484374
复制相似问题