conductor-checkpoint-msg_01AVxUgejEA9piS6narw4omz

This commit is contained in:
Daniel Chalef 2025-10-30 17:30:36 -07:00
parent 8e44bec395
commit ef8507a9df
2 changed files with 10 additions and 17 deletions

View file

@ -214,22 +214,15 @@ class AnthropicClient(LLMClient):
try:
# Create the appropriate tool based on whether response_model is provided
tools, tool_choice = self._create_tool(response_model)
# Build the message creation parameters
create_params: dict[str, typing.Any] = {
'system': system_message.content,
'max_tokens': max_creation_tokens,
'messages': user_messages_cast,
'model': self.model,
'tools': tools,
'tool_choice': tool_choice,
}
# Only include temperature if it's not None
if self.temperature is not None:
create_params['temperature'] = self.temperature
result = await self.client.messages.create(**create_params)
result = await self.client.messages.create(
system=system_message.content,
max_tokens=max_creation_tokens,
temperature=self.temperature,
messages=user_messages_cast,
model=self.model,
tools=tools,
tool_choice=tool_choice,
)
# Extract the tool output from the response
for content_item in result.content:

View file

@ -39,7 +39,7 @@ class LLMConfig:
api_key: str | None = None,
model: str | None = None,
base_url: str | None = None,
temperature: float | None = None,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_MAX_TOKENS,
small_model: str | None = None,
):