Skip to main content

LangChain

1.1 Usage

from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
openai_api_base="https://api.hpc-ai.com/inference/v1",
openai_api_key="your-hpc-ai-api-key",
model="minimax/minimax-m2.5"
)

response = llm.invoke("Hello!")
print(response.content)

Method 2: Environment Variables

import os
os.environ["OPENAI_API_BASE"] = "https://api.hpc-ai.com/inference/v1"
os.environ["OPENAI_API_KEY"] = "your-hpc-ai-api-key"

from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="minimax/minimax-m2.5")

Method 3: JavaScript/TypeScript

import { ChatOpenAI } from "@langchain/openai";

const llm = new ChatOpenAI({
baseURL: "https://api.hpc-ai.com/inference/v1",
apiKey: "your-hpc-ai-api-key",
model: "minimax/minimax-m2.5"
});

Method 4: Use in Agent

from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain import hub

llm = ChatOpenAI(
openai_api_base="https://api.hpc-ai.com/inference/v1",
openai_api_key="your-hpc-ai-api-key",
model="minimax/minimax-m2.5"
)

prompt = hub.pull("hwchase17/openai-functions-agent")
agent = create_openai_functions_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)

result = agent_executor.invoke({"input": "Your question"})

1.2 References