Replies: 1 comment
-
CodexGraph报错:ValueError: Invalid model cfg根据堆栈调试得到出错入口为CodexGraphAgentChat构建 def get_agent(self):
graph_db = self.get_graph_db(
st.session_state.shared['setting']['project_id'])
if not graph_db:
return None
llm_config = get_llm_config(
st.session_state.shared['setting']['llm_model_name'])
max_iterations = int(
st.session_state.shared['setting']['max_iterations'])
prompt_path = str(
Path(st.session_state.shared['setting']['prompt_path']).joinpath(
'code_chat'))
schema_path = str(
Path(st.session_state.shared['setting']['prompt_path']).joinpath(
'graph_database'))
try:
agent = CodexGraphAgentChat( <----------
llm=llm_config,
prompt_path=prompt_path,
schema_path=schema_path,
task_id=st.session_state.shared['setting']['project_id'],
graph_db=graph_db,
max_iterations=max_iterations,
message_callback=self.create_update_message()) 继续向下调试得到类modelscope_agent.agent.Agent构造方法 class Agent(ABC):
function_map: dict = {
} # used to record all the tools' instance, moving here to avoid `del` method crash.
def __init__(self,
function_list: Union[Dict, List[Union[str, Dict]],
None] = None,
llm: Optional[Union[Dict, BaseChatModel]] = None,
storage_path: Optional[str] = None,
name: Optional[str] = None,
description: Optional[str] = None,
instruction: Union[str, dict] = None,
use_tool_api: bool = False,
callbacks: list = None,
openapi_list: Optional[List[Union[str, Dict]]] = None,
**kwargs):
"""
init tools/llm/instruction for one agent
Args:
function_list: A list of tools
(1)When str: tool names
(2)When Dict: tool cfg
llm: The llm config of this agent
(1) When Dict: set the config of llm as {'model': '', 'api_key': '', 'model_server': ''}
(2) When BaseChatModel: llm is sent by another agent
storage_path: If not specified otherwise, all data will be stored here in KV pairs by memory
name: the name of agent
description: the description of agent, which is used for multi_agent
instruction: the system instruction of this agent
use_tool_api: whether to use the tool service api, else to use the tool cls instance
callbacks: the callbacks that could be used during different phase of agent loop
openapi_list: the openapi list for remote calling only
kwargs: other potential parameters
"""
if isinstance(llm, Dict):
self.llm_config = llm
self.llm = get_chat_model(cfg=self.llm_config) <------ modelscope_agent.utils.qwen_agent.base.get_chat_model方法最终抛出ValueError def get_chat_model(cfg: Union[dict, str] = 'qwen-plus') -> BaseChatModel:
"""The interface of instantiating LLM objects.
Args:
cfg: The LLM configuration, one example is:
cfg = {
# Use the model service provided by DashScope:
'model': 'qwen-max',
'model_server': 'dashscope',
# Use your own model service compatible with OpenAI API:
# 'model': 'Qwen',
# 'model_server': 'http://127.0.0.1:7905/v1',
# (Optional) LLM hyper-parameters:
'generate_cfg': {
'top_p': 0.8,
'max_input_tokens': 6500,
'max_retries': 10,
}
}
Returns:
LLM object.
"""
if isinstance(cfg, str):
cfg = {'model': cfg}
if 'model_type' in cfg:
model_type = cfg['model_type']
if model_type in LLM_REGISTRY:
if model_type in ('oai', 'qwenvl_oai'):
if cfg.get('model_server', '').strip() == 'dashscope':
cfg = copy.deepcopy(cfg)
cfg['model_server'] = 'https://dashscope.aliyuncs.com/compatible-mode/v1'
return LLM_REGISTRY[model_type](cfg)
else:
raise ValueError(
f'Please set model_type from {str(LLM_REGISTRY.keys())}')
# Deduce model_type from model and model_server if model_type is not provided:
if 'azure_endpoint' in cfg:
model_type = 'azure'
cfg['model_type'] = model_type
return LLM_REGISTRY[model_type](cfg)
if 'model_server' in cfg:
if cfg['model_server'].strip().startswith('http'):
model_type = 'openai_fn_call'
cfg['model_type'] = model_type
return LLM_REGISTRY[model_type](cfg)
model = cfg.get('model', '')
if '-vl' in model.lower():
model_type = 'qwenvl_dashscope'
cfg['model_type'] = model_type
return LLM_REGISTRY[model_type](cfg)
if '-audio' in model.lower():
model_type = 'qwenaudio_dashscope'
cfg['model_type'] = model_type
return LLM_REGISTRY[model_type](cfg)
if 'qwen' in model.lower():
model_type = 'qwen_dashscope'
cfg['model_type'] = model_type
return LLM_REGISTRY[model_type](cfg)
raise ValueError(f'Invalid model cfg: {cfg}') 而通过网页配置得到的model cfg只可能是以下两种,而这两种在get_chat_model方法中必定触发ValueError def get_llm_config(llm_name):
if llm_name == 'deepseek-coder':
llm_config = {
'model': 'deepseek-coder',
'api_base': 'https://api.deepseek.com',
'model_server': 'openai'
}
elif llm_name == 'gpt-4o':
llm_config = {
'model': 'gpt-4o-2024-05-13',
'api_base': 'https://api.openai.com/v1',
'model_server': 'openai'
}
else:
return None
return llm_config |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
-
Title: Error Occurs with Both GPT and DeepSeek Keys Post-Deployment -
LLM_REGISTRY
Only Showsopenai_fn_call
Description:
After completing the deployment and build process, I'm encountering an error regardless of whether I use a GPT API key or a DeepSeek API key. This makes me suspect a configuration issue on my end.
Debugging Findings:
Upon investigation, I've found that the global variable
LLM_REGISTRY
only hasTextChatAtOAI
of typeopenai_fn_call
registered. Other LLM types (like DeepSeek) don't seem to be registered, which might be causing the issue.Could this be due to a misconfiguration on my part?
Log Output:
Question:
Is there a specific configuration step I might have missed to ensure other LLM providers (like DeepSeek) are correctly registered in
LLM_REGISTRY
? Any guidance would be appreciated.Beta Was this translation helpful? Give feedback.
All reactions