Text generation
Copy
import openai
client = openai.OpenAI(
base_url="https://api.llm7.io/v1",
api_key="unused", # Required. Get it for free at https://token.llm7.io/ for higher rate limits.
)
resp = client.chat.completions.create(
model="default", # or "fast" / "pro"
messages=[
{"role": "system", "content": "Answer concisely."},
{"role": "user", "content": "Give me three onboarding tips for new engineers."},
],
temperature=0.4,
)
print(resp.choices[0].message.content)
Use
model="fast" for lowest latency and model="pro" (paid) for the highest quality reasoning.