1.处理流程
语音识别
自然语言处理 - 语义分析
逻辑分析 - 结合业务场景与上下文
自然语言处理 - 分析结果生成自然语言文本
语音合成
自然语言处理的常用处理过程:
先针对训练文本进行分词处理(词干提取、原型提取),统计词频,通过词频-逆文档频率算法获得该词对样本语义的贡献,根据每个词的贡献力度,构建有监督分类学习模型。把测试样本交给模型处理,得到测试样本的语义类别。
自然语言工具包 - NLTK
2.文本分析
# 文本分词
"""
可能要使用以下操作
import nltk
nltk.download('puntk')
import nltk.tokenize as tk
# 把样本按句子进行拆分 sent_list:句子列表
sent_list = tk.sent_tokenize(text)
# 把样本按单词进行拆分 word_list:单词列表
word_list = tk.word_tokenize(text)
# 把样本按单词进行拆分 punctTokenizer:分词器对象
punctTokenizer = tk.WordPunctTokenizer()
word_list = punctTokenizer.tokenize(text)
"""
import nltk.tokenize as tk
import nltk.stem.porter as pt
import nltk.stem.lancaster as lc
import nltk.stem.snowball as sb
text = "Are you curious about tokenization? " \
"Let's see how it works! " \
"We need to analyze a couple of sentences " \
"with punctuations to see it in action."
print(text)
sent_list = tk.sent_tokenize(text)
print(sent_list)
word_list = tk.word_tokenize(text)
print(word_list)
# 分词器对象
punctTokenlizer = tk.WordPunctTokenizer()
tokens = punctTokenlizer.tokenize(text)
print(tokens)
# 词干提取
"""
stemmer = pt.PorterStemmer() # 波特词干提取器,偏宽松
stemmer = lc.LancasterStemmer() # 朗卡斯特词干提取器,偏严格
stemmer = sb.SnowballStemmer('english') # 思诺博词干提取器,偏中庸
"""
words = ['table', 'probably', 'wolves', 'playing',
'is', 'dog', 'the', 'beaches', 'grounded',
'dreamt', 'envision']
pt_stemmer = pt.PorterStemmer()
lc_stemmer = lc.LancasterStemmer()
sb_stemmer = sb.SnowballStemmer('english')
for word in words:
pt_stem = pt_stemmer.stem(word)
lc_stem = pt_stemmer.stem(word)
sb_stem = sb_stemmer.stem(word)
print('%8s %8s %8s %8s' %
(word, pt_stem, lc_stem, sb_stem))
"""
table tabl tabl tabl
probably probabl probabl probabl
wolves wolv wolv wolv
playing play play play
is is is is
dog dog dog dog
the the the the
beaches beach beach beach
grounded ground ground ground
dreamt dreamt dreamt dreamt
envision envis envis envis
"""
# 词性还原
import nltk.stem as ns
words = ['table', 'probably', 'wolves', 'playing',
'is', 'dog', 'the', 'beaches', 'grounded',
'dreamt', 'envision']
# 获取词性还原器对象
# word_inverse = ns.WordNetLemmatizer()
# for word in words:
# n_word = word_inverse.lemmatize(word, pos='n')
# v_word = word_inverse.lemmatize(word, pos='v')
# print('%8s %8s %8s' % (word, n_word, v_word))
# 词袋模型,统计单词在一个句子中出现次数
import sklearn.feature_extraction.text as ft
# 词袋模型对象
cv = ft.CountVectorizer()
# 训练模型,把句子中所有可能出现的单词作为特征名,每一个句子为一个样本,
# 单词在句子中出现的次数为特征值
bow = cv.fit_transform(sent_list).toarray()
print(bow)
# 获取所有特征名
print(cv.get_feature_names())
# 词频,将词袋模型归一化
import sklearn.preprocessing as sp
mat = sp.normalize(bow, norm='l1')
print(mat)
3.文本分类
# 文本分类
import sklearn.datasets as sd
import sklearn.feature_extraction.text as ft
import sklearn.naive_bayes as nb
train = sd.load_files('../data/ml_data/20news', encoding='latin1',
shuffle=True, random_state=7)
print(train)
train_data = train.data
train_target = train.target
categories = train.target_names
# 词袋模型
cv = ft.CountVectorizer()
train_bow = cv.fit_transform(train_data)
# tf_idf模型
tf_idf = ft.TfidfTransformer()
# tf_idf模型矩阵
tf_idf_mat = tf_idf.fit_transform(train_bow)
# 构建模型
model = nb.MultinomialNB()
# 模型训练
model.fit(tf_idf_mat, train_target)
# 测试
test_data = [
'The curveballs of right handed pitchers tend to curve to the left',
'Caesar cipher is an ancient form of encryption',
'This two-wheeler is really good on slippery roads']
test_bow = cv.transform(test_data)
test_x = tf_idf.transform(test_bow)
pred_test_y = model.predict(test_x)
for sentence, index in zip(test_data, pred_test_y):
print(sentence, '->', categories[index])
# 性别分类
import random
import nltk.corpus as nc
import nltk.classify as cf
# 读取语料库中names文件夹里的male.txt文件,并且进行分词
male_names = nc.names.words('male.txt')
print(male_names)
female_names = nc.names.words('female.txt')
print(female_names)
data = []
for male_name in male_names:
feature = {'feature': male_name[-2:].lower()}
data.append((feature, 'male'))
for female_name in female_names:
feature = {'feature': female_name[-2:].lower()}
data.append((feature, 'female'))
random.seed(7)
random.shuffle(data)
train_data = data[:len(data)//2]
test_data = data[len(data)//2:]
model = cf.NaiveBayesClassifier.train(train_data)
ac = cf.accuracy(model, test_data)
print(ac) # 0.7781973816717019
# 预测
names, genders = ['Leonardo', 'Amy', 'Sam', 'Tom', 'Katherine', 'Taylor', 'Susanne'], []
for name in names:
feature = {'feature': name[-2:].lower()}
gender = model.classify(feature)
genders.append(gender)
for name, gender in zip(names, genders):
print(name+'->'+gender)
"""
Leonardo->male
Amy->female
Sam->male
Tom->male
Katherine->female
Taylor->male
Susanne->female
"""
# 情感分析
import nltk.corpus as nc
import nltk.classify as cf
import nltk.classify.util as cu
# 整理所有正面评论单词,存入pdata列表
fileids = nc.movie_reviews.fileids('pos')
pdata = []
for fileid in fileids:
sample = {}
words = nc.movie_reviews.words(fileid)
for word in words:
sample[word] = True
pdata.append((sample, 'POSITIVE'))
# 整理所有正面评论单词,存入ndata列表
ndata = []
fileids = nc.movie_reviews.fileids('neg')
for fileid in fileids:
sample = {}
words = nc.movie_reviews.words(fileid)
for word in words:
sample[word] = True
ndata.append((sample, 'NEGATIVE'))
# 拆分测试集与训练集数量(80%作为训练集)
pnumb, nnumb = int(0.8 * len(pdata)), int(0.8 * len(ndata))
train_data = pdata[:pnumb] + ndata[:nnumb]
test_data = pdata[pnumb:] + ndata[nnumb:]
# 基于朴素贝叶斯分类器训练测试数据
model = cf.NaiveBayesClassifier.train(train_data)
ac = cu.accuracy(model, test_data)
print(ac) # 0.735
# 模拟业务场景
reviews = [
'It is an amazing movie.',
'This is a dull movie. I would never recommend it to anyone.',
'The cinematography is pretty great in this movie.',
'The direction was terrible and the story was all over the place.']
sents, probs = [], []
for review in reviews:
sample = {}
words = review.split()
for word in words:
sample[word] = True
pcls = model.classify(sample)
print(review, '->', pcls)
"""
It is an amazing movie. -> POSITIVE
This is a dull movie. I would never recommend it to anyone. -> NEGATIVE
The cinematography is pretty great in this movie. -> POSITIVE
The direction was terrible and the story was all over the place. -> NEGATIVE
"""
# 主题抽取
import nltk.corpus as nc
import nltk.tokenize as tk
import nltk.stem.snowball as sb
import gensim.models.ldamodel as lda
import gensim.corpora as gc
doc = []
with open('../data/ml_data/topic.txt', 'r') as f:
for line in f.readlines():
doc.append(line[:-1]) # 去掉换行符
print(doc)
# 词袋模型
tokenizer = tk.PunktSentenceTokenizer()
# 停用词表
stopwords = nc.stopwords.words('english')
# print(stopwords)
signs = [',', '.', '!']
# 词干提取模型
stemmer = sb.SnowballStemmer('english')
# print(stemmer)
lines_token = []
for line in doc:
# 提取一行词干
tokens = tokenizer.tokenize(line.lower())
print(tokens)
line_tokens = []
for token in tokens:
if token not in stopwords and token not in signs:
token = stemmer.stem(token)
line_tokens.append(token)
lines_token.append(line_tokens)
# print(lines_token)
# 把lines_tokens中出现的单词都存入gc提供的词典对象,对每一个单词做编码
dic = gc.Dictionary(lines_token)
# 遍历每一行,构建词袋列表
bow = []
for line in lines_token:
# 通过字典构建词袋
row = dic.doc2bow(line)
bow.append(row)
n_topics = 2
"""
# 构建LDA模型
# bow: 词袋
# num_topics: 分类数
# id2word: 词典
# passes: 每个主题保留的最大主题词个数
"""
# 通过词袋、分类数、词典、每个主题保留的最大主题词个数构建LDA模型
model = lda.LdaModel(bow, num_topics=n_topics, id2word=dic, passes=25)
# 输出每个类别中对类别贡献最大的4个主题词
topics = model.print_topics(num_topics=n_topics, num_words=4)
print(topics)
4.语音识别
# 语音识别
# 读取信号
import numpy.fft as fft
import numpy as np
import scipy.io.wavfile as wf
import matplotlib.pyplot as mp
samples_rate, signals = wf.read('../data/ml_data/freq.wav')
print(samples_rate)
print(signals)
signals = signals/2**15
times = np.arange(len(signals))/samples_rate
print(times)
# 傅里叶变换
freqs = fft.fftfreq(signals.size, 1/samples_rate)
ffts = fft.fft(signals)
pows = np.abs(ffts)
mp.subplot(121)
mp.title('time domain')
mp.plot(times, signals, label='signal')
mp.legend()
mp.subplot(122)
mp.title('freq domain')
mp.plot(freqs[freqs > 0], pows[freqs > 0], label='power')
mp.legend()
mp.tight_layout()
mp.show()
# 声音合成
import json
with open('../data/ml_data/12.json', 'r') as f:
freqs = json.loads(f.read())
tones = [
('G5', 1.5),
('A5', 0.5),
('G5', 1.5),
('E5', 0.5),
('D5', 0.5),
('E5', 0.25),
('D5', 0.25),
('C5', 0.5),
('A4', 0.5),
('C5', 0.75)]
sample_rate = 44100
music = np.empty(shape=1)
for tone, duration in tones:
times = np.linspace(0, duration, duration*samples_rate)
sound = np.sin(2 * np.pi * freqs[tone] * times)
music = np.append(music, sound)
music *= 2**15
music = music.astype(np.int16)
wf.write('music.wav', samples_rate, music)
版权声明:本文为baidu_38812770原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。