GalileoAsyncCallback Objects
class GalileoAsyncCallback(AsyncCallbackHandler)
Async Langchain callback handler for logging traces to the Galileo platform.
Arguments:
_galileo_logger
(GalileoLogger
): The Galileo logger instance.
_nodes
(dict[UUID, Node]
): A dictionary of nodes, where the key is the run_id and the value is the node.
_start_new_trace
(bool
): Whether to start a new trace when a chain starts. Set this to False
to continue using the current trace.
_flush_on_chain_end
(bool
): Whether to flush the trace when a chain ends.
on_chain_start
async def on_chain_start(serialized: dict[str, Any],
inputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
**kwargs: Any) -> Any
Langchain callback when a chain starts.
on_chain_end
async def on_chain_end(outputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any) -> Any
Langchain callback when a chain ends.
on_agent_finish
async def on_agent_finish(finish: AgentFinish, *, run_id: UUID,
**kwargs: Any) -> Any
Langchain callback when an agent finishes.
on_llm_start
async def on_llm_start(serialized: dict[str, Any],
prompts: list[str],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any) -> Any
Langchain callback when an LLM node starts.
Note: This callback is only used for non-chat models.
on_llm_new_token
async def on_llm_new_token(token: str, *, run_id: UUID, **kwargs: Any) -> Any
Langchain callback when an LLM node generates a new token.
on_chat_model_start
async def on_chat_model_start(serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any) -> Any
Langchain callback when a chat model starts.
on_llm_end
async def on_llm_end(response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any) -> Any
Langchain callback when an LLM node ends.
async def on_tool_start(serialized: dict[str, Any],
input_str: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any) -> Any
Langchain callback when a tool node starts.
async def on_tool_end(output: Any,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any) -> Any
Langchain callback when a tool node ends.
on_retriever_start
async def on_retriever_start(serialized: dict[str, Any],
query: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any) -> Any
Langchain callback when a retriever node starts.
on_retriever_end
async def on_retriever_end(documents: list[Document],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any) -> Any
Langchain callback when a retriever node ends.
on_chain_error
async def on_chain_error(error: Exception,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any) -> Any
Called when a chain errors.
on_llm_error
async def on_llm_error(error: Exception,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any) -> Any
Called when an LLM errors.
async def on_tool_error(error: Exception,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any) -> Any
Called when a tool errors.
on_retriever_error
async def on_retriever_error(error: Exception,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any) -> Any
Called when a retriever errors.