Skip to main content

AutoGen

1.1 Usage

from autogen_ext.models.openai import OpenAIChatCompletionClient

client = OpenAIChatCompletionClient(
model="minimax/minimax-m2.5",
base_url="https://api.hpc-ai.com/inference/v1",
api_key="your-hpc-ai-api-key",
model_info={
"vision": False,
"function_calling": True,
"json_output": True,
}
)

# Use in Agent
from autogen_agentchat import AssistantAgent
agent = AssistantAgent(
name="assistant",
model_client=client,
)

Method 2: config_list (Legacy)

import autogen

llm_config = {
"config_list": [
{
"model": "minimax/minimax-m2.5",
"api_base": "https://api.hpc-ai.com/inference/v1",
"api_key": "your-hpc-ai-api-key",
"api_type": "openai"
}
]
}

assistant = autogen.AssistantAgent(
name="assistant",
llm_config=llm_config
)

Method 3: Custom Model Client (Advanced)

from autogen_core.models import ChatCompletionClient, CreateResult, LLMMessage, ModelInfo

class CustomModelClient(ChatCompletionClient):
def __init__(self, api_key, base_url, model, **kwargs):
self.api_key = api_key
self.base_url = base_url
self.model = model
self._model_info = ModelInfo(
vision=False, function_calling=True, json_output=True
)

@property
def model_info(self) -> ModelInfo:
return self._model_info

async def create(self, messages: List[LLMMessage], **kwargs) -> CreateResult:
import requests

headers = {"Authorization": f"Bearer {self.api_key}"}
payload = {"model": self.model, "messages": [{"role": m.role, "content": m.content} for m in messages]}

response = requests.post(f"{self.base_url}/chat/completions", headers=headers, json=payload)
result = response.json()

return CreateResult(
content=result["choices"][0]["message"]["content"],
finish_reason=result["choices"][0].get("finish_reason", "stop"),
usage=result.get("usage", {})
)

client = CustomModelClient(
api_key="your-hpc-ai-api-key",
base_url="https://api.hpc-ai.com/inference/v1",
model="minimax/minimax-m2.5"
)

1.2 Troubleshooting

Common IssueSolution
Version compatibilityAutoGen 0.2+ uses autogen_ext.models.openai
model_info errorsSet vision, function_calling, json_output per model capabilities
async/await errorsNew version uses async interface

1.3 References