Everything you need to hit the core endpoints in one place: text generation, image generation, and audio transcription.
Text generation
import openai
client = openai.OpenAI(
base_url="https://api.llm7.io/v1",
api_key="none", # or your token
)
resp = client.chat.completions.create(
model="default", # or "fast" / "pro"
messages=[
{"role": "system", "content": "Answer concisely."},
{"role": "user", "content": "Give me three onboarding tips for new engineers."},
],
temperature=0.4,
)
print(resp.choices[0].message.content)
Use model="fast" for lowest latency and model="pro" (paid) for the highest quality reasoning.
Image generation
from openai import OpenAI
client = OpenAI(base_url="https://api.llm7.io/v1", api_key="none")
prompt = (
"A futuristic cityscape at sunset, flying cars, neon reflections, "
"cinematic cyberpunk, highly detailed"
)
res = client.images.generate(
model="flux", # or "turbo" (aliases: 1, 2, image-model-1, image-model-2)
prompt=prompt,
size="1024x1024", # or set w / h (100–1500)
extra_body={"seed": 42},
)
print(res.data[0].url)
Audio transcription
import openai
import pathlib
client = openai.OpenAI(
base_url="https://api.llm7.io/v1",
api_key="none", # or your token
)
def transcribe(audio_path: str):
with pathlib.Path(audio_path).open("rb") as fp:
return client.audio.transcriptions.create(
model="gpt-4o-mini-audio-preview",
file=fp,
language="en",
response_format="json",
temperature=0,
)
print(transcribe("1.mp3"))
Expected response structure:
Transcription(text="Transcription...", logprobs=None)
Audio supports .mp3 and .wav. Set language to improve accuracy for non-English audio.