Parcourir la source

change nan to zero

chenhaiyang il y a 5 ans
Parent
commit
3cf5a3d9ea
3 fichiers modifiés avec 14 ajouts et 37 suppressions
  1. 6 12
      sim/text_sim.py
  2. 7 22
      sim/words_sim.py
  3. 1 3
      tests/test_text_sim.py

+ 6 - 12
sim/text_sim.py

@@ -6,6 +6,7 @@ from operator import itemgetter
 
 import jieba
 import numpy as np
+
 from .words_sim import SimCilin
 
 data = pkgutil.get_data(__package__, 'cilin_words.txt')
@@ -53,6 +54,10 @@ def get_similarity(s1, s2):
             sim_list.sort()
             all_sim_2.append(sim_list[-1])
 
+    if not all_sim_1:
+        all_sim_1 = [0]
+    if not all_sim_2:
+        all_sim_2 = [0]
     return (np.mean(all_sim_1) + np.mean(all_sim_2)) / 2
 
 
@@ -64,6 +69,7 @@ def most_similar_items(src_s, sentences, n=3):
     :param n: return number
     :return:
     """
+    src_s = [x for x in jieba.cut(src_s)]
     sentences = segmentation(sentences)
     temp = list()
     for item in sentences:
@@ -93,15 +99,3 @@ def merge(word_list):
     for w in word_list:
         s += w.split('/')[0]
     return s
-
-
-if __name__ == '__main__':
-    str1 = '我喜欢吃苹果'
-    str2 = '他喜欢肯红薯'
-    str1_seg = jieba.cut(str1)
-    str2_seg = jieba.cut(str2)
-    str1_new = [x for x in str1_seg]
-    str2_new = [x for x in str2_seg]
-    str_l = ['我喜欢吃梨', '你喜欢吃苹果', '他喜欢吃橙子']
-    print(get_similarity(str1_new, str2_new))
-    print(most_similar_items(str1, str_l, 5))

+ 7 - 22
sim/words_sim.py

@@ -1,23 +1,21 @@
 # -*- coding: utf-8 -*-
 
-import codecs
-import os
-import time
+import pkgutil
 
-base_path = os.path.abspath(__file__)
-folder = os.path.dirname(base_path)
-data_path = os.path.join(folder, 'cilin_dict.txt')
+cilin_data = pkgutil.get_data(__package__, 'cilin_dict.txt')
 
 
 class SimCilin(object):
 
     def __init__(self):
-        self.cilin_path = data_path
+        self.cilin_data = cilin_data
         self.sem_dict = self.load_semantic()
 
-    def load_semantic(self):
+    @staticmethod
+    def load_semantic():
         sem_dict = dict()
-        for line in codecs.open(self.cilin_path):
+        lines = cilin_data.decode().split('\n')
+        for line in lines:
             line = line.strip().split(' ')
             sem_type = line[0]
             words = line[1:]
@@ -54,16 +52,3 @@ class SimCilin(object):
                 elif index in [3, 4]:
                     score += 1
         return score / 10
-
-
-if __name__ == '__main__':
-    w1 = '歌手'
-    w2 = '演员'
-    ci_lin = SimCilin()
-    start = time.perf_counter()
-    v = 0.0
-    for i in range(20000):
-        v = ci_lin.compute_word_sim(w1, w2)
-    end = time.perf_counter()
-    print(end - start)
-    print(v)

+ 1 - 3
tests/test_text_sim.py

@@ -4,9 +4,7 @@
 
 from sim.text_sim import most_similar_items
 
-
 if __name__ == '__main__':
-
     str1 = '我喜欢吃苹果'
-    str_l = ['我喜欢吃梨', '你喜欢吃苹果', '他喜欢吃橙子']
+    str_l = ['我喜欢吃梨', '你喜欢吃苹果', 'unbelievable']
     print(most_similar_items(str1, str_l, 5))