在本专栏前一篇布尔检索——短语检索的基础上,实现 k-gram 索引,利用 Jaccard 距离进行初步筛选,结合 Soundex的索引结果, 最后用动态规划算法选择最相似的一个词,给出结果。
计算编辑距离
# 计算编辑距离def distan(self, word1, word2):len1 = len(word1)len2 = len(word2)row1 = list(range(0, len1+1)) #初始化矩阵rect = [row1]for i in range(1, len2+1):row = np.zeros(len1+1)row[0] = irect.append(row)rect = np.array(rect)for i in range(1, len2+1): #计算for j in range(1, len1+1):min1 = min(rect[i-1][j], rect[i][j-1])if word2[i-1] == word1[j-1]:min2 = min(min1+1, rect[i-1][j-1])else:min2 = min(min1+1, rect[i-1][j-1]+1)rect[i][j] = min2# print(rect)return rect[len2][len1]
# jeccard过滤def count_jeccard(self, similar_words, grams):similars = [] for simi in similar_words:simi_word_gram = '$'+ simi +'$'simi_grams = []for k in range(2,5):for st in range(0, len(simi_word_gram) - k + 1):simi_gram = simi_word_gram[st:st+k]simi_grams.append(simi_gram)#len(并集) / len(交集)jeccard = len(list(set(simi_grams) & set(grams)))/len(list(set(simi_grams) | set(grams)))if jeccard > 0.1:similars.append(simi)# print(similars) # 经过jeccard过滤后的词return similars
#soundexdef count_soundex(self, word):to_ = ['aeiouhwy', 'bfpv', 'cgjkqsxz', 'dt', 'l', 'mn','r']sound_word = word[1:]for i in range(7):sound_word = re.sub(r"[{}]+".format(to_[i]), str(i), sound_word) #去0sound_word = place('0', '')#保留第一个字母word = word[0] + sound_wordif len(word) < 4:word = word +'00'word = word[0:4]# print(word)return word
import re
import os
import nltk
import numpy as np
from collections import defaultdictdef get_words(text):text = text.lower() # 全部字符转为小写words = nltk.word_tokenize(text) # 分词return wordsdef get_files(dir, file_type='.txt'):file_list = []for home, dirs, files in os.walk(dir):for filename in files:if file_type in filename:file_list.append(os.path.join(home, filename))return file_list# 构造每种类型词的正则表达式,()代表分组,?P<NAME>为组命名
token_or = r'(?P<OR>||)'
token_not = r'(?P<NOT>!)'
token_word = r'(?P<WORD>[a-zA-Z]+)'
token_and = r'(?P<AND>&&)'
token_lp = r'(?P<LP>()'
token_rp = r'(?P<RP>))'
token_dp = r'(?P<DP>")'
lexer = repile('|'.join([token_or, token_not, token_word,token_and, token_lp, token_rp, token_dp]))
# 编译正则表达式 # 用编译好的正则表达式进行词法分析
def get_tokens(query):tokens = [] # tokens中的元素类型为(token, token类型)for token in re.finditer(lexer, query):tokens.append((up(), token.lastgroup))return tokensclass BoolRetrieval:"""布尔检索类index = {'hello':{1:[1,3,5], 3:[5,6]}, 'world':{4:[2,4]}}gram_index = [two-gram, three-gram, four-gram]"""def __init__(self, index_path=''):if index_path == '':self.index = defaultdict(dict)# two-gram three-gram am_index=[defaultdict(set) , defaultdict(set) , defaultdict(set)] # two-gram three-gram four-gramself.soundex = defaultdict(set)# 已有构建好的索引文件else:data = np.load(index_path, allow_pickle=True)self.files = data['files'][()]self.index = data['index'][()]am_index = data['gram_index'][()]self.soundex = data['soundex'][()]self.query_tokens = []def build_index(self, text_dir):self.files = get_files(text_dir) # 获取所有文件名for num in range(0, len(self.files)):f = open(self.files[num])text = f.read()words = get_words(text) # 分词# print(words)# 构建索引, 遍历每个词for pos_num in range(0, len(words)):word = words[pos_num]# self.indexif num not in (self.index[word]).keys():self.index[word][num] = []self.index[word][num].append(pos_num)# am_indexword_gram = '$'+word+'$'for k in range(2,5):for st in range(0, len(word_gram) - k + 1):gram = word_gram[st:st+am_index[k-2][gram].add(word)# self.soundexself.unt_soundex(word)].add(word)# am_index) print(self.soundex)np.savez('index_3.npz', files=self.files, index=self.index, gram_index = am_index, soundex = self.soundex)def search(self, query):self.query_tokens = get_tokens(query) # 获取查询的tokens, [(a,word), (||, or), (b, word)]print(self.query_tokens)result = []# 将查询得到的文件ID转换成文件名for num in self.evaluate(0, len(self.query_tokens) - 1):result.append(self.files[num])return result# 递归解析布尔表达式,p、q为子表达式左右边界的下标def evaluate(self, p, q):# 解析错误if p > q:return []# 单个token,一定为查询词elif p == q:word = self.query_tokens[p][0]if word in self.index.keys():return self.index[word].keys()else: # 如果查找词不在词典中simi_word = self.search_simi(word)print("Sorry, '%s' can't be found. Maybe you want to search for '%s'."%(word, simi_word))return self.index[simi_word].keys()# 去掉外层括号elif self.check_parentheses(p, q):return self.evaluate(p + 1, q - 1)#被双引号包围的短语elif self.check_quotation (p, q):return self.phrase_search(p + 1, q - 1)else:op = self.find_operator(p, q)if op == -1:return []# files1为运算符左边得到的结果,files2为右边if self.query_tokens[op][1] == 'NOT':files1 = []else:files1 = self.evaluate(p, op - 1)files2 = self.evaluate(op + 1, q)(files1, files2, self.query_tokens[op][1])# 判断表达式是否为 (expr)def check_parentheses(self, p, q):"""判断表达式是否为 (expr)整个表达式的左右括号必须匹配才为合法的表达式返回True或False"""if self.query_tokens[p][1] == 'LP' and self.query_tokens[q][1] == 'RP':rp = 0 # 记录括号for i in range(p+1, q):if self.query_tokens[i][1] == 'LP':rp += 1elif self.query_tokens[i][1] == 'RP':rp -= 1if rp < 0:return Falseif rp == 0:return True return False# 被双引号包围的短语,内部不能出现引号及运算符def check_quotation(self, p, q):if self.query_tokens[p][1] == 'DP' and self.query_tokens[q][1] == 'DP':for i in range(p+1, q):if self.query_tokens[i][1] in ["DP", "&&", "||", "!"]:return Falsereturn True return False#短语查询: 先找出files 包含短语里的每个词, 然后遍历files 及 每个词#双词索引查找结果 与 位置索引查找结果 比较def phrase_search(self, p, q):result = []#位置索引查找files = self.evaluate(p, p) #短语可能存在的文件for i in range(p+1, q+1):files = (files, self.evaluate(i,i), 'AND')for i in files:pos_list = self.index[self.query_tokens[p][0]][i] # p 词在文件 i 中的位置for j in range(p+1, q+1):pos_list = [pos+1 for pos in pos_list] # 如果是短语,后面的词的位置应该 +1true_pos_list = self.index[self.query_tokens[j][0]][i] # 真实的位置pos_list = (pos_list, true_pos_list, 'AND') # 取交集if not pos_list == []:result.append(i)return result# 寻找表达式的dominant的运算符(优先级最低)def find_operator(self, p, q):rp = 0 # 记录括号index = -1 #记录位置,若没有运算符,返回-1i = p tem = 0 # 记录opop_dic = {'!':1, '&&':2, '||':3} #比较优先级while i <= q:if self.query_tokens[i][1] == 'RP':rp += 1elif self.query_tokens[i][1] == 'LP':rp -= 1elif rp == 0 and self.query_tokens[i][1] in ['AND', "OR", 'NOT'] :if tem <= op_dic[self.query_tokens[i][0]]:tem = op_dic[self.query_tokens[i][0]]index = ii += 1# print(index)return indexdef merge(self, files1, files2, op_type): #根据运算符对进行相应的操作test = []if op_type == 'AND':test = list(set(files1) & set(files2))elif op_type == "OR":test = list(set(files1) | set(files2))elif op_type == "NOT":test = list(set(range(0, len(self.files))) - set(files2))return test# 找出最相近的三个词def search_simi(self, word):similar_words = []grams = []word_gram = '$'+word+'$'for k in range(2,5): # 将包含词缀的词都取出for st in range(0, len(word_gram) - k + 1):gram = word_gram[st:st+k]grams.append(gram)similar_words = list(set(similar_words) | am_index[k-2][gram])# print(similar_words)similars = unt_jeccard(similar_words, grams) # jeccard过滤word_sound = unt_soundex(word) #添加#soundex词similars = list(set(similars) | self.soundex[word_sound])most_simi = 0 # 比较编辑距离,找最相似的词dist = self.distan(word, similars[0])for i in range(1, len(similars)):if self.distan(word, similars[i]) < dist:most_simi = idist = self.distan(word, similars[i])return similars[most_simi] #返回最相似的词# 计算编辑距离def distan(self, word1, word2):len1 = len(word1)len2 = len(word2)row1 = list(range(0, len1+1)) #初始化矩阵rect = [row1]for i in range(1, len2+1):row = np.zeros(len1+1)row[0] = irect.append(row)rect = np.array(rect)for i in range(1, len2+1): #计算for j in range(1, len1+1):min1 = min(rect[i-1][j], rect[i][j-1])if word2[i-1] == word1[j-1]:min2 = min(min1+1, rect[i-1][j-1])else:min2 = min(min1+1, rect[i-1][j-1]+1)rect[i][j] = min2# print(rect)return rect[len2][len1]# jeccard过滤def count_jeccard(self, similar_words, grams):similars = [] for simi in similar_words:simi_word_gram = '$'+ simi +'$'simi_grams = []for k in range(2,5):for st in range(0, len(simi_word_gram) - k + 1):simi_gram = simi_word_gram[st:st+k]simi_grams.append(simi_gram)#len(并集) / len(交集)jeccard = len(list(set(simi_grams) & set(grams)))/len(list(set(simi_grams) | set(grams)))if jeccard > 0.1:similars.append(simi)# print(similars) # 经过jeccard过滤后的词return similars#soundexdef count_soundex(self, word):to_ = ['aeiouhwy', 'bfpv', 'cgjkqsxz', 'dt', 'l', 'mn','r']sound_word = word[1:]for i in range(7):sound_word = re.sub(r"[{}]+".format(to_[i]), str(i), sound_word) #去0sound_word = place('0', '')#保留第一个字母word = word[0] + sound_wordif len(word) < 4:word = word +'00'word = word[0:4]# print(word)return word#首次调用,生成.npz文件。# br = BoolRetrieval()
# br.build_index(r'要构建索引的文档路径')#再次调用时,直接读取该文件中构建好的索引即可。
br = BoolRetrieval('index_3.npz')
while True:query = input("请输入与查询(与||,或&&,非!):")print(br.search(query))
若有不解或疏漏之处,欢迎留言。
本文发布于:2024-01-28 18:48:27,感谢您对本站的认可!
本文链接:https://www.4u4v.net/it/17064389109499.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
留言与评论(共有 0 条评论) |