我是用的伯努利来实现中文垃圾短信的分类,其中中文分词因为用的jieba没设置停用词,所以垃圾短信的召回率会降低,建议最好加上停用词。
数据集用的常见的80w条带标签的中文短信,下载链接https://github.com/hrwhisper/SpamMessage/blob/master/data/%E5%B8%A6%E6%A0%87%E7%AD%BE%E7%9F%AD%E4%BF%A1.txt
下面是源码
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import codecs
import numpy as np
import math
import jieba
# In[3]:
#2.计算对数先验概率
def getLogPrior(train):
'''
totals:{'y':单词总数,'no':'单词总数'}
samples:{'y':样本数,'n':样本数}
logPrior:{'y':先验概率,'no':先验概率}
'''
a1 = train[train['1']==0]
a2 = train[train['1']==1]
samples={'y':len(a1),'n':len(a2)}
docSum = samples['y']+samples['n']
prior={'y':samples['y']/docSum, 'n':samples['n']/docSum}
logPrior={'y':math.log(prior['y']), 'n':math.log(prior['n'])}
return (docSum,samples,logPrior,prior)
# In[4]:
def getConditionPro(train):
conditionPro={'y':{},'n':{}}
logConditionPro={'y':{},'n':{}}
docSum,samples,logPrior,prior=getLogPrior(train)
a1 = train[train['1']==0]
a2 = train[train['1']==1]
classNum=2
wordSet=set()
for X_word in train['2']:
for word in X_word:
wordSet.add(word)
#1.分类别计算词出现的次数
words={'y':{},'n':{}}
for messge i