瀏覽代碼

根据词林的字符串相似度匹配函数

jxing 5 年之前
父節點
當前提交
36c4663a9b

+ 0 - 0
src/str_similar/__init__.py


文件差異過大導致無法顯示
+ 17884 - 0
src/str_similar/cilin_dict.txt


文件差異過大導致無法顯示
+ 82193 - 0
src/str_similar/cilin_words.txt


+ 119 - 0
src/str_similar/str_similar.py

@@ -0,0 +1,119 @@
+
+from shapely.geometry import Polygon
+import json
+
+
+column_space_id = 'space_id'
+column_outline = 'outline'
+column_floor_id = 'floor_id'
+column_revit_id = 'revit_id'
+
+
+# 获取Polygon对象
+def get_polygon(single_poly):
+    poly_len = len(single_poly)
+    poly = []
+    for i in range(poly_len):
+        pair = single_poly[i]
+        poly.append((pair["X"], pair["Y"]))
+    return Polygon(poly)
+
+# 在polygon1包含polygon2的时候, 检测是否polygon1内的空洞也包含polygon2
+def is_include(polygon1, poly2):
+    length1 = len(polygon1)
+    for i in range(1, length1):
+        poly1 = get_polygon(polygon1[i])
+        if poly1.overlaps(poly2):
+            return True
+        if poly1.equals(poly2) or poly1.contains(poly2):
+            return False
+    return True
+
+def is_sub_outline_overlap(polygon1, polygon2):
+    poly1 = get_polygon(polygon1[0])
+    poly2 = get_polygon(polygon2[0])
+    if poly1.overlaps(poly2) or poly1.equals(poly2):
+        return True
+    if poly1.contains(poly2) or poly1.equals(poly2):
+        return is_include(polygon1, poly2)
+    if poly2.contains(poly1) or poly2.equals(poly1):
+        return is_include(polygon2, poly1)
+    return False
+
+# 是否面积有重叠
+def is_overlap(polygon1, ispace_polygon):
+    length1 = len(polygon1)
+    length2 = len(ispace_polygon)
+    if length1 == 0 or length2 == 0:
+        return False
+
+    for i in range(length1):
+        for j in range(length2):
+            if is_sub_outline_overlap(polygon1[i], ispace_polygon):
+                return True
+    return False
+
+# 根据业务空间轮廓和元空间轮廓是否有重叠部分来判断关系
+# 返回dict, 格式 {space_id  --> {ispace_id}}
+def build_rel_space_ispace(space_data, ispace_data):
+    rel_dict = {}
+    for space in space_data:
+        space_id = space.get(column_space_id)
+        for ispace in ispace_data:
+            space_outline = json.loads(space.get(column_outline))
+            ispace_outline = json.loads(ispace.get(column_outline))
+            if is_overlap(space_outline, ispace_outline):
+                if space_id not in rel_dict:
+                    rel_dict[space_id] = set()
+                revit_set = rel_dict.get(space_id)
+                revit_set.add(ispace.get(column_revit_id))
+    return rel_dict
+
+# 返回被删除的元空间的revit_id
+def get_deleted_ispace_revit_id(new_ispace_data, prev_ispace_data):
+    deleted = []
+    prev_ispace_revit_id_set = set()
+    for prev_ispace in prev_ispace_data:
+        prev_ispace_revit_id_set.add(prev_ispace.get(column_revit_id))
+    for new_ispace in new_ispace_data:
+        prev_id = new_ispace.get(column_revit_id)
+        if prev_id in prev_ispace_revit_id_set:
+            prev_ispace_revit_id_set.remove(prev_id)
+    deleted.extend(prev_ispace_revit_id_set)
+    return deleted
+
+# 返回被修改的元空间的revit_id
+def get_updated_ispace_revit_id(new_ispace_data, prev_ispace_data):
+    updated = []
+    new_ispace_dict = {}
+    prev_ispace_dict = {}
+    for prev_ispace in prev_ispace_data:
+        prev_id = prev_ispace.get(column_revit_id)
+        prev_outline = json.loads(prev_ispace.get(column_revit_id))
+        prev_ispace_dict[prev_id] = prev_outline
+    for new_ispace in new_ispace_data:
+        new_id = new_ispace.get(column_revit_id)
+        new_outline = json.loads(new_ispace.get(column_revit_id))
+        new_ispace_dict[new_id] = new_outline
+    for prev_id, prev_outline in prev_ispace_dict.items():
+        if prev_id in new_ispace_dict:
+            new_outline = new_ispace_dict.get(prev_id)
+            prev_poly = get_polygon(prev_outline[0])
+            new_poly = get_polygon(new_outline[0])
+            if not prev_poly.equals(new_poly):
+                updated.append(prev_id)
+    return updated
+
+# 获取受影响的业务空间的id数组
+# space_data是可能受影响的业务空间的数据, new_ispace_data 是新模型的元空间数据, prev_ispace_data是上一个模型的元空间数据
+def get_affected_spaced(space_data, new_ispace_data, prev_ispace_data):
+    affected_spaces = []
+    space_ispace_rel = build_rel_space_ispace(space_data, prev_ispace_data)
+    affected_ispace_revit_id = get_deleted_ispace_revit_id(new_ispace_data, prev_ispace_data)
+    affected_ispace_revit_id.extend(get_updated_ispace_revit_id(new_ispace_data, prev_ispace_data))
+    for space_id, ispace_id_dict in space_ispace_rel.items():
+        for revit_id in affected_ispace_revit_id:
+            if revit_id in ispace_id_dict:
+                affected_spaces.append(space_id)
+                break
+    return affected_spaces

+ 106 - 0
src/str_similar/test.py

@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+
+import json
+
+import psycopg2
+from shapely.geometry import Polygon
+import sys
+
+from src.grid.check_grid import check_grid_upright
+
+involved_space = [
+    'id',
+    'floor_name',
+    'project_id',
+    'folder_id',
+    'fid',
+    'accept_time',
+    'version',
+    'note',
+    'user_id',
+    'user_name',
+    'log',
+    'url',
+    'md5',
+    'status'
+]
+
+grid_keys = [
+    'id',
+    'model_id',
+    'name',
+    'type',
+    'last_update',
+    'create_time',
+    'revit_id',
+    'source_id',
+    'location',
+]
+
+
+def get_data(sql):
+    global connection, cursor
+    record = []
+    try:
+        connection = psycopg2.connect(
+            database='datacenter',
+            user='postgres',
+            password='123456',
+            host='192.168.20.234',
+            port='5432'
+        )
+        cursor = connection.cursor()
+        cursor.execute(sql)
+        record = cursor.fetchall()
+    except (Exception, psycopg2.Error) as error:
+        print("Error while connecting to PostgreSQL", error)
+    finally:
+        if connection:
+            cursor.close()
+            connection.close()
+            print('PostgreSQL connection is closed')
+    return record
+
+
+def loads(x):
+    x['location'] = json.loads(x['location'])
+    return x
+
+
+def loads_curve(x):
+    x['curve'] = json.loads(x['curve'])
+    return x
+
+
+if __name__ == '__main__':
+    points1 = [(0, 0), (0, 1), (1, 1), (1, 0)]
+    points2 = [(0, 0), (0.00, 0.5), (0, 1), (1, 1), (1, 0)]
+    poly1 = Polygon(points1)
+    poly2 = Polygon(points2)
+
+    print(poly1.equals(poly2))
+    print(poly1.almost_equals(poly2, 0.99))
+    print(poly1.almost_equals(poly2, 0))
+    print(poly1.almost_equals(poly2, -10))
+    print(poly1.almost_equals(poly2, 2))
+    print(poly1.almost_equals(poly2, 0.0001))
+
+
+
+    # involved_space = "select * from  where folder_id = " \
+    #                      "'bbe510dbe26011e999b69b669ea08505' and status in (3, 31, 4) "
+    # grid_sql = "select * from revit.grid where model_id = "
+    # columns_data = get_data(involved_model_sql)
+    #
+    # model_list = [dict(zip(involved_model_keys, item)) for item in columns_data]
+    # if len(model_list) < 2:
+    #     sys.exit()
+    # grid_data = dict()
+    # for item in model_list:
+    #     current_grid_sql = grid_sql + '\'{model_id}\''.format(model_id=item.get('fid'))
+    #     single_model_grid = get_data(current_grid_sql)
+    #     single_model_grid = [dict(zip(grid_keys, item)) for item in single_model_grid]
+    #     grid_data[item.get('fid')] = single_model_grid
+    #
+    # print(check_grid_upright(model_list, grid_data))
+

+ 110 - 0
src/str_similar/text_sim.py

@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+
+import pkgutil
+from io import StringIO
+from operator import itemgetter
+
+import jieba
+import numpy as np
+# from relations.src.str_similar import words_sim #SimCilin
+
+# data = pkgutil.get_data(__package__, '.cilin_words.txt')
+from src.str_similar.words_sim import SimCilin
+
+data = pkgutil.get_data('src.str_similar', 'cilin_words.txt')
+jieba.load_userdict(StringIO(data.decode()))
+
+ci_lin = SimCilin()
+
+
+def segmentation(sentence_list):
+    """
+    Segment a set of sentences before calculate similarity
+    :param sentence_list:
+    :return:
+    """
+    result = list()
+    for s in sentence_list:
+        temp_seg = jieba.cut(s)
+        result.append([x for x in temp_seg])
+
+    return result
+
+
+def get_similarity(s1, s2):
+    """
+    Calculate the similarity of two words by Cilin
+    :param s1: a word list, the result of segmentation
+    :param s2:
+    :return:
+    """
+    all_sim_1 = list()
+    for w1 in s1:
+        if is_contains_chinese(w1):
+            sim_list = list()
+            for w2 in s2:
+                sim_list.append(ci_lin.compute_word_sim(w1, w2))
+            sim_list.sort()
+            all_sim_1.append(sim_list[-1])
+
+    all_sim_2 = list()
+    for w1 in s2:
+        if is_contains_chinese(w1):
+            sim_list = list()
+            for w2 in s1:
+                sim_list.append(ci_lin.compute_word_sim(w1, w2))
+            sim_list.sort()
+            all_sim_2.append(sim_list[-1])
+
+    return (np.mean(all_sim_1) + np.mean(all_sim_2)) / 2
+
+
+def most_similar_items(src_s, sentences, n=3):
+    """
+    Return the sentences most similar to the target
+    :param src_s: target sentence
+    :param sentences:
+    :param n: return number
+    :return:
+    """
+    sentences = segmentation(sentences)
+    temp = list()
+    for item in sentences:
+        sim_value = get_similarity(src_s, item)
+        temp.append({
+            'key': merge(item),
+            'value': sim_value,
+        })
+    result = sorted(temp, key=itemgetter('value'), reverse=True)
+    return result[:n]
+
+
+def is_contains_chinese(s):
+    """
+    Tell if the string contains a Chinese string
+    :param s:
+    :return:
+    """
+    for _char in s:
+        if '\u4e00' <= _char <= '\u9fa5':
+            return True
+    return False
+
+
+def merge(word_list):
+    s = ''
+    for w in word_list:
+        s += w.split('/')[0]
+    return s
+
+
+if __name__ == '__main__':
+    str1 = '我喜欢吃苹果'
+    str2 = '他喜欢肯红薯'
+    str1_seg = jieba.cut(str1)
+    str2_seg = jieba.cut(str2)
+    str1_new = [x for x in str1_seg]
+    str2_new = [x for x in str2_seg]
+    str_l = ['我喜欢吃梨', '你喜欢吃苹果', '他喜欢吃橙子']
+    print(get_similarity(str1_new, str2_new))
+    print(most_similar_items(str1, str_l, 2))

+ 69 - 0
src/str_similar/words_sim.py

@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+import codecs
+import os
+import time
+
+base_path = os.path.abspath(__file__)
+folder = os.path.dirname(base_path)
+data_path = os.path.join(folder, 'cilin_dict.txt')
+
+
+class SimCilin(object):
+
+    def __init__(self):
+        self.cilin_path = data_path
+        self.sem_dict = self.load_semantic()
+
+    def load_semantic(self):
+        sem_dict = dict()
+        for line in codecs.open(self.cilin_path, encoding='utf-8'):
+            line = line.strip().split(' ')
+            sem_type = line[0]
+            words = line[1:]
+            for word in words:
+                if word not in sem_dict:
+                    sem_dict[word] = sem_type
+                else:
+                    sem_dict[word] += ';' + sem_type
+
+        for word, sem_type in sem_dict.items():
+            sem_dict[word] = sem_type.split(';')
+        return sem_dict
+
+    def compute_word_sim(self, word1, word2):
+        sems_word1 = self.sem_dict.get(word1, [])
+        sems_word2 = self.sem_dict.get(word2, [])
+        score_list = [self.compute_sem(sem_word1, sem_word2) for sem_word1 in sems_word1 for sem_word2 in sems_word2]
+        if score_list:
+            return max(score_list)
+        else:
+            return 0
+
+    @staticmethod
+    def compute_sem(sem1, sem2):
+        sem1 = [sem1[0], sem1[1], sem1[2:4], sem1[4], sem1[5:7], sem1[-1]]
+        sem2 = [sem2[0], sem2[1], sem2[2:4], sem2[4], sem2[5:7], sem2[-1]]
+        score = 0
+        for index in range(len(sem1)):
+            if sem1[index] == sem2[index]:
+                if index in [0, 1]:
+                    score += 3
+                elif index == 2:
+                    score += 2
+                elif index in [3, 4]:
+                    score += 1
+        return score / 10
+
+
+if __name__ == '__main__':
+    w1 = '歌手'
+    w2 = '演员'
+    ci_lin = SimCilin()
+    start = time.perf_counter()
+    v = 0.0
+    for i in range(20000):
+        v = ci_lin.compute_word_sim(w1, w2)
+    end = time.perf_counter()
+    print(end - start)
+    print(v)