Browse Source

support meeting info extraction and add related service

highing666 4 years ago
parent
commit
025ab4baac

+ 12 - 5
app/api/routers/nlp.py

@@ -7,9 +7,16 @@ router = APIRouter()
 
 
 @router.get('/meeting/info', response_model=MeetingInfoResponse)
-async def catch_meeting_info(sentence: str = Query(..., max_length=50)):
-    await get_caught_result(sentence)
-
-    return {
-        'Message': 'success'
+async def catch_meeting_info(sentence: str = Query(..., max_length=100)):
+    start_time, end_time, duration, room_size, topic, name_list = await get_caught_result(sentence)
+    response = {
+        'Message': 'success',
+        'AcceptableStartTime': start_time,
+        'AcceptableEndTime': end_time,
+        'MeetingDurationSeconds': duration,
+        'MeetingRoomSize': room_size,
+        'Topic': topic,
+        'Participants': name_list
     }
+
+    return response

+ 69 - 20
app/controllers/nlp/meeting.py

@@ -1,33 +1,82 @@
-import json
-
-from typing import List
+from typing import Dict, List, Tuple
 
+from httpx import AsyncClient
 from loguru import logger
-from tencentcloud.nlp.v20190408 import nlp_client, models
 
-from app.services.tencent_nlp import get_tencent_nlp_client
+from app.services.duckling import Duckling
+from app.services.tencent_nlp import TencentNLP
 
 
 class MeetingInfoCatcher:
 
-    def __init__(self, client: nlp_client.NlpClient, sentence: str):
+    def __init__(self, nlp_service: TencentNLP, duckling: Duckling):
         super(MeetingInfoCatcher, self).__init__()
-        self.client = client
-        self.sentence = sentence
+        self.nlp_service = nlp_service
+        self.duckling = duckling
+
+    async def extract_time(self, sentence: str) -> Tuple[str, str, int]:
+        start_time, end_time, duration = '', '', -1
+        parsed = await self.duckling.parse(sentence)
+        for dim in parsed:
+            if dim['dim'] == 'time':
+                start_time = dim['value']['from']['value']
+                end_time = dim['value']['to']['value']
+            if dim['dim'] == 'duration':
+                duration = dim['value']['normalized']['value']
+
+        return start_time, end_time, duration
+
+    async def extract_room_size(self, sentence: str) -> str:
+        dp_tokens = await self.nlp_service.get_dependency(sentence)
+        size = ''
+        for token in dp_tokens:
+            if await self.nlp_service.get_word_similarity(token.Word, '会议室') > 0.8:
+                index = token.Id
+                for item in dp_tokens:
+                    if item.HeadId == index:
+                        logger.debug(item)
+                        if await self.nlp_service.get_word_similarity(item.Word, '小') > 0.9:
+                            size = 'small'
+                        if await self.nlp_service.get_word_similarity(item.Word, '中') > 0.9:
+                            size = 'medium'
+                        if await self.nlp_service.get_word_similarity(item.Word, '大') > 0.9:
+                            size = 'large'
+                break
+
+        return size
 
-    def run(self):
-        req = models.LexicalAnalysisRequest()
-        params = {
-            'Text': self.sentence
-        }
-        req.from_json_string(json.dumps(params))
+    async def extract_topic(self, sentence: str) -> str:
+        summarization = await self.nlp_service.get_auto_summarization_result(sentence)
 
-        resp = self.client.LexicalAnalysis(req)
-        logger.debug(resp)
+        return summarization
+
+    async def extract_name(self, sentence: str) -> List[str]:
+        _, ner_tokens = await self.nlp_service.get_lexical_analysis_result(sentence)
+        name_list = []
+        for token in ner_tokens:
+            if token.Type == 'PER':
+                name_list.append(token.Word)
+
+        return name_list
+
+    async def run(self, sentence: str) -> Tuple:
+        similarity = await self.nlp_service.get_text_similarity_result('我要开会', [sentence])
+        if similarity[-1].Score < 0.5:
+            return '', '', -1, '', '', []
+        else:
+            start_time, end_time, interval = await self.extract_time(sentence)
+            topic = await self.extract_topic(sentence)
+            name_list = await self.extract_name(sentence)
+            room_size = await self.extract_room_size(sentence)
+
+            return start_time, end_time, interval, room_size, topic, name_list
 
 
 @logger.catch()
-async def get_caught_result(sentence: str):
-    client = get_tencent_nlp_client()
-    catcher = MeetingInfoCatcher(client, sentence)
-    catcher.run()
+async def get_caught_result(sentence: str) -> Tuple:
+    async with AsyncClient() as client:
+        duckling = Duckling(client)
+        service = TencentNLP()
+
+        catcher = MeetingInfoCatcher(service, duckling)
+        return await catcher.run(sentence)

+ 2 - 0
app/core/config.py

@@ -18,6 +18,8 @@ class Settings(BaseSettings):
     TENCENT_SECRET_ID_V1: str
     TENCENT_SECRET_KEY_V1: str
 
+    DUCKLING_HOST: AnyHttpUrl
+
     PROJECT_DIR: DirectoryPath
     LOGS_DIR: DirectoryPath
 

+ 1 - 1
app/models/domain/nlp.py

@@ -17,7 +17,7 @@ class NLPResponseBase(BaseModel):
 class MeetingInfoResponse(NLPResponseBase):
     AcceptableStartTime: Optional[str]
     AcceptableEndTime: Optional[str]
-    MeetingTimeDelta: Optional[int]
+    MeetingDurationSeconds: Optional[int]
     MeetingRoomSize: Optional[RoomSize]
     Topic: Optional[str]
     Participants: Optional[List[str]]

+ 28 - 0
app/services/duckling.py

@@ -0,0 +1,28 @@
+from typing import Dict
+
+from httpx import AsyncClient, URL
+
+from app.core.config import settings
+from app.services.service import api_exception
+
+
+class Duckling:
+    """
+    Duckling is a Haskell library that parses text into structured data.
+    """
+
+    def __init__(self, client: AsyncClient, server_settings=settings):
+        super(Duckling, self).__init__()
+        self._client = client
+        self._host = URL(server_settings.DUCKLING_HOST)
+
+    @api_exception
+    async def parse(self, text: str, locale: str = 'zh_CN') -> Dict:
+        url = self._host.join('parse')
+        data = {
+            'locale': locale,
+            'text': text
+        }
+        raw_response = await self._client.post(url, data=data)
+
+        return raw_response.json()

+ 73 - 1
app/services/tencent_nlp.py

@@ -1,10 +1,13 @@
+import json
+from typing import Dict, List, Tuple
+
 from loguru import logger
 
 from tencentcloud.common import credential
 from tencentcloud.common.profile.client_profile import ClientProfile
 from tencentcloud.common.profile.http_profile import HttpProfile
 from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
-from tencentcloud.nlp.v20190408 import nlp_client
+from tencentcloud.nlp.v20190408 import models, nlp_client
 
 from app.core.config import settings
 
@@ -23,3 +26,72 @@ def get_tencent_nlp_client() -> nlp_client.NlpClient:
         return client
     except TencentCloudSDKException as e:
         logger.error(e)
+
+
+class TencentNLP:
+
+    def __init__(self):
+        cred = credential.Credential(settings.TENCENT_SECRET_ID_V1, settings.TENCENT_SECRET_KEY_V1)
+        http_profile = HttpProfile()
+        http_profile.reqMethod = 'GET'
+        http_profile.endpoint = settings.TENCENT_NLP_ENDPOINT
+
+        client_profile = ClientProfile()
+        client_profile.httpProfile = http_profile
+        client = nlp_client.NlpClient(cred, 'ap-guangzhou', client_profile)
+        self.client = client
+
+    async def get_lexical_analysis_result(self, text: str) -> Tuple[Dict, Dict]:
+        req = models.LexicalAnalysisRequest()
+        params = {
+            'Text': text
+        }
+        req.from_json_string(json.dumps(params))
+        resp = self.client.LexicalAnalysis(req)
+
+        return resp.PosTokens, resp.NerTokens
+
+    async def get_auto_summarization_result(self, text: str) -> str:
+        req = models.AutoSummarizationRequest()
+        params = {
+            'Text': text
+        }
+        req.from_json_string(json.dumps(params))
+        resp = self.client.AutoSummarization(req)
+
+        return resp.Summary
+
+    async def get_text_similarity_result(self, src_text: str, target_text: List[str]) -> List:
+        req = models.TextSimilarityRequest()
+        params = {
+            'SrcText': src_text,
+            'TargetText': target_text
+        }
+        req.from_json_string(json.dumps(params))
+        resp = self.client.TextSimilarity(req)
+
+        return resp.Similarity
+
+    async def get_dependency(self, text: str) -> List:
+        req = models.DependencyParsingRequest()
+        params = {
+            'Text': text
+        }
+        req.from_json_string(json.dumps(params))
+        resp = self.client.DependencyParsing(req)
+
+        return resp.DpTokens
+
+    async def get_word_similarity(self, src_word: str, target: str) -> float:
+        try:
+            req = models.WordSimilarityRequest()
+            params = {
+                'SrcWord': src_word,
+                'TargetWord': target
+            }
+            req.from_json_string(json.dumps(params))
+            resp = self.client.WordSimilarity(req)
+        except TencentCloudSDKException:
+            return 0
+
+        return resp.Similarity