GalileoCallback Objects

class GalileoCallback(BaseCallbackHandler)

Langchain callback handler for logging traces to the Galileo platform.

Arguments:

  • _galileo_logger (GalileoLogger): The Galileo logger instance.
  • _nodes (dict[UUID, Node]): A dictionary of nodes, where the key is the run_id and the value is the node.
  • _start_new_trace (bool): Whether to start a new trace when a chain starts. Set this to False to continue using the current trace.
  • _flush_on_chain_end (bool): Whether to flush the trace when a chain ends.

on_chain_start

def on_chain_start(serialized: dict[str, Any],
                   inputs: dict[str, Any],
                   *,
                   run_id: UUID,
                   parent_run_id: Optional[UUID] = None,
                   tags: Optional[list[str]] = None,
                   **kwargs: Any) -> Any

Langchain callback when a chain starts.

on_chain_end

def on_chain_end(outputs: dict[str, Any],
                 *,
                 run_id: UUID,
                 parent_run_id: Optional[UUID] = None,
                 **kwargs: Any) -> Any

Langchain callback when a chain ends.

on_agent_finish

def on_agent_finish(finish: AgentFinish, *, run_id: UUID,
                    **kwargs: Any) -> Any

Langchain callback when an agent finishes.

on_llm_start

def on_llm_start(serialized: dict[str, Any],
                 prompts: list[str],
                 *,
                 run_id: UUID,
                 parent_run_id: Optional[UUID] = None,
                 tags: Optional[list[str]] = None,
                 metadata: Optional[dict[str, Any]] = None,
                 **kwargs: Any) -> Any

Langchain callback when an LLM node starts.

Note: This callback is only used for non-chat models.

on_llm_new_token

def on_llm_new_token(token: str, *, run_id: UUID, **kwargs: Any) -> Any

Langchain callback when an LLM node generates a new token.

on_chat_model_start

def on_chat_model_start(serialized: dict[str, Any],
                        messages: list[list[BaseMessage]],
                        *,
                        run_id: UUID,
                        parent_run_id: Optional[UUID] = None,
                        tags: Optional[list[str]] = None,
                        metadata: Optional[dict[str, Any]] = None,
                        **kwargs: Any) -> Any

Langchain callback when a chat model starts.

on_llm_end

def on_llm_end(response: LLMResult,
               *,
               run_id: UUID,
               parent_run_id: Optional[UUID] = None,
               **kwargs: Any) -> Any

Langchain callback when an LLM node ends.

on_tool_start

def on_tool_start(serialized: dict[str, Any],
                  input_str: str,
                  *,
                  run_id: UUID,
                  parent_run_id: Optional[UUID] = None,
                  tags: Optional[list[str]] = None,
                  metadata: Optional[dict[str, Any]] = None,
                  **kwargs: Any) -> Any

Langchain callback when a tool node starts.

on_tool_end

def on_tool_end(output: Any,
                *,
                run_id: UUID,
                parent_run_id: Optional[UUID] = None,
                **kwargs: Any) -> Any

Langchain callback when a tool node ends.

on_retriever_start

def on_retriever_start(serialized: dict[str, Any],
                       query: str,
                       *,
                       run_id: UUID,
                       parent_run_id: Optional[UUID] = None,
                       tags: Optional[list[str]] = None,
                       metadata: Optional[dict[str, Any]] = None,
                       **kwargs: Any) -> Any

Langchain callback when a retriever node starts.

on_retriever_end

def on_retriever_end(documents: list[Document],
                     *,
                     run_id: UUID,
                     parent_run_id: Optional[UUID] = None,
                     **kwargs: Any) -> Any

Langchain callback when a retriever node ends.