Core Logging
Galileo Logger
Overview
Getting Started
SDK/API
- Python SDK Reference
- TypeScript SDK Reference
How-to Guides
- Overview
- Conversational AI
- Retrieval-Augmented Generation
- Agentic AI
Cookbooks
- Features
Concepts
Core Logging
Galileo Logger
The GalileoLogger
is a low-level API for logging traces and spans to Galileo. It provides more control over the logging process than the higher-level wrappers.
Usage
import { GalileoLogger } from "galileo";
// You can set the GALILEO_PROJECT and GALILEO_LOG_STREAM environment variables
const logger = new GalileoLogger({
projectName: 'my-project',
logStreamName: 'my-log-stream'
});
Examples
Basic Example
import { GalileoLogger } from "galileo";
async function runLoggerExample() {
// You can set the GALILEO_PROJECT and GALILEO_LOG_STREAM environment variables
const logger = new GalileoLogger({
projectName: "my-project",
logStreamName: "my-log-stream",
});
console.log("Creating trace with spans...");
// Create a new trace
const trace = logger.startTrace({
input: "Example trace input", // input
output: undefined, // output (will be set later)
name: "Example Trace", // name
createdAt: Date.now() * 1000000, // createdAt in nanoseconds
durationNs: undefined, // durationNs
metadata: { source: "test-script" }, // metadata
tags: ["test", "example"], // tags
});
// Add a workflow span (parent span)
const workflowSpan = logger.addWorkflowSpan({
input: "Processing workflow", // input
output: undefined, // output (will be set later)
name: "Main Workflow", // name
durationNs: undefined, // durationNs
createdAt: Date.now() * 1000000, // createdAt in nanoseconds
metadata: { workflow_type: "test" }, // metadata
tags: ["workflow"], // tags
});
// Add an LLM span as a child of the workflow span
logger.addLlmSpan({
input: [{ role: "user", content: "Hello, how are you?" }], // input messages
output: {
role: "assistant",
content: "I am doing well, thank you for asking!",
}, // output message
model: "gpt-3.5-turbo", // model name
name: "Chat Completion", // name
durationNs: 1000000000, // durationNs (1s)
metadata: { temperature: "0.7" }, // metadata
tags: ["llm", "chat"], // tags
});
// Conclude the workflow span
logger.conclude({
output: "Workflow completed successfully",
durationNs: 2000000000, // 2 seconds
});
// Conclude the trace
logger.conclude({
output: "Final trace output with all spans completed",
durationNs: 3000000000, // 3 seconds
});
// Flush the traces to Galileo
const flushedTraces = await logger.flush();
return flushedTraces;
}
// Run the example
runLoggerExample();
Complete Example with OpenAI
import { GalileoLogger } from "galileo";
import { OpenAI } from "openai";
async function callLLM(input: string, model: string, temperature: number) {
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const response = await openai.chat.completions.create({
model: model,
messages: [{ role: "user", content: input }],
temperature: temperature,
});
return response.choices[0].message.content;
}
async function main() {
const logger = new GalileoLogger({
projectName: "my-project",
logStreamName: "my-log-stream",
});
const input = "Why is the sky blue?";
const model = "gpt-4o";
const temperature = 0.7;
// Start a trace
const trace = logger.startTrace({
input, // input
output: undefined, // output (will be set later)
name: "Sky Question", // name
createdAt: Date.now() * 1000000, // createdAt in nanoseconds
durationNs: undefined, // durationNs
metadata: { source: "test-script" }, // metadata
tags: ["sky", "science"], // tags
});
try {
// Call the LLM
const startTime = Date.now();
const output = await callLLM(input, model, temperature);
const endTime = Date.now();
const durationNs = (endTime - startTime) * 1000000; // convert to nanoseconds
// Add an LLM span
logger.addLlmSpan({
input: [{ role: "user", content: input }],
output: { role: "assistant", content: output },
model: model,
name: "Sky Question LLM Call",
durationNs: durationNs,
metadata: { temperature: temperature.toString() },
tags: ["llm", "sky"],
});
// Conclude the trace
logger.conclude({
output: output,
durationNs: durationNs,
});
console.log(output);
} catch (error) {
// Log the error
console.error("Error:", error);
} finally {
// Flush the trace to Galileo
await logger.flush();
}
}
// Run the example
main();
API Reference
class GalileoLogger {
constructor(options?: { projectName?: string; logStreamName?: string });
startTrace(
input: any,
output?: any,
name?: string,
createdAt?: number,
durationNs?: number,
metadata?: Record<string, any>,
tags?: string[]
): GalileoLogger;
addWorkflowSpan(
input: any,
output?: any,
name?: string,
durationNs?: number,
createdAt?: number,
metadata?: Record<string, any>,
tags?: string[]
): GalileoLogger;
addLlmSpan(options: {
input: any;
output?: any;
model?: string;
name?: string;
durationNs?: number;
metadata?: Record<string, any>;
tags?: string[];
}): GalileoLogger;
conclude(options: { output?: any; durationNs?: number }): GalileoLogger;
flush(): Promise<any>;
}