AutoGen
1.1 Usage
Method 1: OpenAI-Compatible Endpoint (Recommended for AutoGen 0.2+)
from autogen_ext.models.openai import OpenAIChatCompletionClient
client = OpenAIChatCompletionClient(
model="minimax/minimax-m2.5",
base_url="https://api.hpc-ai.com/inference/v1",
api_key="your-hpc-ai-api-key",
model_info={
"vision": False,
"function_calling": True,
"json_output": True,
}
)
# Use in Agent
from autogen_agentchat import AssistantAgent
agent = AssistantAgent(
name="assistant",
model_client=client,
)
Method 2: config_list (Legacy)
import autogen
llm_config = {
"config_list": [
{
"model": "minimax/minimax-m2.5",
"api_base": "https://api.hpc-ai.com/inference/v1",
"api_key": "your-hpc-ai-api-key",
"api_type": "openai"
}
]
}
assistant = autogen.AssistantAgent(
name="assistant",
llm_config=llm_config
)
Method 3: Custom Model Client (Advanced)
from autogen_core.models import ChatCompletionClient, CreateResult, LLMMessage, ModelInfo
class CustomModelClient(ChatCompletionClient):
def __init__(self, api_key, base_url, model, **kwargs):
self.api_key = api_key
self.base_url = base_url
self.model = model
self._model_info = ModelInfo(
vision=False, function_calling=True, json_output=True
)
@property
def model_info(self) -> ModelInfo:
return self._model_info
async def create(self, messages: List[LLMMessage], **kwargs) -> CreateResult:
import requests
headers = {"Authorization": f"Bearer {self.api_key}"}
payload = {"model": self.model, "messages": [{"role": m.role, "content": m.content} for m in messages]}
response = requests.post(f"{self.base_url}/chat/completions", headers=headers, json=payload)
result = response.json()
return CreateResult(
content=result["choices"][0]["message"]["content"],
finish_reason=result["choices"][0].get("finish_reason", "stop"),
usage=result.get("usage", {})
)
client = CustomModelClient(
api_key="your-hpc-ai-api-key",
base_url="https://api.hpc-ai.com/inference/v1",
model="minimax/minimax-m2.5"
)
1.2 Troubleshooting
| Common Issue | Solution |
|---|---|
| Version compatibility | AutoGen 0.2+ uses autogen_ext.models.openai |
| model_info errors | Set vision, function_calling, json_output per model capabilities |
| async/await errors | New version uses async interface |