litellm --model gpt-5-mini --host 127.0.0.1 --port 4000
from agno.agent import Agent, RunOutput # noqa from agno.models.litellm import LiteLLMOpenAI agent = Agent(model=LiteLLMOpenAI(id="gpt-5-mini"), markdown=True) agent.print_response("Share a 2 sentence horror story", stream=True)
Set up your virtual environment
uv venv --python 3.12 source .venv/bin/activate
Set your API key
export LITELLM_API_KEY=xxx
Install dependencies
uv pip install -U litellm[proxy] openai agno
Start the proxy server
Run Agent
python cookbook/11_models/litellm/basic_stream.py
Was this page helpful?