Code
cookbook/11_models/openai/responses/async_basic_stream.py
import asyncio
from typing import Iterator # noqa
from agno.agent import Agent, RunOutputEvent # noqa
from agno.models.openai import OpenAIResponses
agent = Agent(model=OpenAIResponses(id="gpt-5-mini"), markdown=True)
# Get the response in a variable
# run_response: Iterator[RunOutputEvent] = agent.run("Share a 2 sentence horror story", stream=True)
# for chunk in run_response:
# print(chunk.content)
# Print the response in the terminal
asyncio.run(agent.aprint_response("Share a 2 sentence horror story", stream=True))
Was this page helpful?