python多进程实现jieba分词

使用多进程提升python的效率是非常有用的,抽时间来学习下。

说明:
1、使用python multiprocessing模块下的Pool
具体用法参考官方文档:https://docs.python.org/zh-cn/3/library/multiprocessing.html
2、思路是将dataframe拆成小块喂入pool中,由于参数是多个,用到了functools下的partial,具体意义可以参考网上资料
3、数据集来自今日头条公开的新闻数据集(约38W条数据)

代码如下(文件路径隐去):

# -*- coding: utf-8 -*-
"""
@Time : 2021/10/2 15:09
@Auth : Eve
@File :multiprocesstest.py
@IDE :PyCharm
"""
from multiprocessing import Pool
from functools import partial
import pandas as pd
import numpy as np
import math
import re
import jieba
import os
import pprint
import time


def getdata(filepath):
    filedata = pd.read_csv(filepath, encoding='utf-8',sep='\n',header=None)
    return filedata

#istest是测试flag,尝试使用多进程分词
def redata(filedata,istest):
    patte = re.compile('(.*?)_!_(.*?)_!_(.*?)_!_(.*?)_!_.*?')
    if istest == 1:
        filedata = filedata.iloc[0:10000,:]
    datasp = filedata[0].str.extract(patte)
    datasp.columns = ['id','num','catgor','content']
    return datasp

def getstopword_list(filepath):
    stopword_list = [k.strip() for k in open(filepath) if k.strip() != '']
    return stopword_list

def getsymbol():
    symbol = ['~', '!', '#', '¥', '%', '……', '&', '*', '(', ')', ' ', '  ', ',', ':',
              '—', '+', '/', '*', '【', '】', '{', '}', '“', '”', '|', '《',
              '》', '?', '?', '[', ']', '『', '』', '"', '"', '!', '$', '=', '-', '(', ')']
    return symbol

def usejieba(stopword_list,symbol,datasp):
    cutWords_list = []
    for item in datasp['content'].astype(str):
        cutWords = [k for k in jieba.cut(item) if k not in stopword_list and k not in symbol]
        cutWords_list.append(cutWords)
    return cutWords_list

#n是打算切分成几份,用于传入进多进程参数
def cutdf(data, n):
    df_num = len(data)
    every_epoch_num = math.floor((df_num / n))
    dfpice = []
    for index in range(n):
        if index < n - 1:
            df_tem = data.iloc[every_epoch_num * index: every_epoch_num * (index + 1),:]
        else:
            df_tem = data.iloc[every_epoch_num * index:,:]
        dfpice.append(df_tem)
    return dfpice

if __name__ == '__main__':
    t1 = time.time()
    datafilepath = '***.txt'
    stopfilepath ='***.txt'
    filedata = getdata(datafilepath)
    datasp = redata(filedata, 0)
    stopword_list = getstopword_list(stopfilepath)
    symbol = getsymbol()
    #分成4份
    lst = cutdf(datasp,4)
    pool = Pool()  #为空则启用最多核
    pfunc = partial(usejieba, stopword_list, symbol)  #使用偏函数
    cutWords_list = pool.map(pfunc,lst)
    # pprint.pprint(cutWords_list)
    # cutWords_list = usejieba(stopword_list,symbol,datasp)
    t2 = time.time()
    print(t2-t1)
    '''
    分词训练完的时间:
    无多进程:148.79890966415405
    使用多进程:65.23338413238525,可见效率大幅提升
    '''


版权声明:本文为weixin_42377217原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。