Refa: Update LLM stream response type to Generator (#9420)
### What problem does this PR solve? Change return type of _generate_streamly from str to Generator[str, None, None] to properly type hint streaming responses. ### Type of change - [x] Refactoring
This commit is contained in:
parent
e845d5f9f8
commit
d7b4e84cda
1 changed files with 2 additions and 2 deletions
|
|
@ -17,7 +17,7 @@ import json
|
|||
import logging
|
||||
import os
|
||||
import re
|
||||
from typing import Any
|
||||
from typing import Any, Generator
|
||||
|
||||
import json_repair
|
||||
from copy import deepcopy
|
||||
|
|
@ -154,7 +154,7 @@ class LLM(ComponentBase):
|
|||
return self.chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf(), **kwargs)
|
||||
return self.chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs)
|
||||
|
||||
def _generate_streamly(self, msg:list[dict], **kwargs) -> str:
|
||||
def _generate_streamly(self, msg:list[dict], **kwargs) -> Generator[str, None, None]:
|
||||
ans = ""
|
||||
last_idx = 0
|
||||
endswith_think = False
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue