|
@@ -11,6 +11,11 @@ jieba.load_userdict('cilin_words.txt')
|
|
|
|
|
|
|
|
|
def segmentation(sentence_list):
|
|
|
+ """
|
|
|
+ Segment a set of sentences before calculate similarity
|
|
|
+ :param sentence_list:
|
|
|
+ :return:
|
|
|
+ """
|
|
|
result = list()
|
|
|
for s in sentence_list:
|
|
|
temp_seg = jieba.cut(s)
|
|
@@ -20,6 +25,12 @@ def segmentation(sentence_list):
|
|
|
|
|
|
|
|
|
def get_similarity(s1, s2):
|
|
|
+ """
|
|
|
+ Calculate the similarity of two words by Cilin
|
|
|
+ :param s1: a word list, the result of segmentation
|
|
|
+ :param s2:
|
|
|
+ :return:
|
|
|
+ """
|
|
|
all_sim_1 = list()
|
|
|
for w1 in s1:
|
|
|
if is_contains_chinese(w1):
|
|
@@ -42,6 +53,13 @@ def get_similarity(s1, s2):
|
|
|
|
|
|
|
|
|
def most_similar_items(src_s, sentences, n=3):
|
|
|
+ """
|
|
|
+ Return the sentences most similar to the target
|
|
|
+ :param src_s: target sentence
|
|
|
+ :param sentences:
|
|
|
+ :param n: return number
|
|
|
+ :return:
|
|
|
+ """
|
|
|
sentences = segmentation(sentences)
|
|
|
temp = list()
|
|
|
for item in sentences:
|
|
@@ -55,6 +73,11 @@ def most_similar_items(src_s, sentences, n=3):
|
|
|
|
|
|
|
|
|
def is_contains_chinese(s):
|
|
|
+ """
|
|
|
+ Tell if the string contains a Chinese string
|
|
|
+ :param s:
|
|
|
+ :return:
|
|
|
+ """
|
|
|
for _char in s:
|
|
|
if '\u4e00' <= _char <= '\u9fa5':
|
|
|
return True
|