ℹ️ These docs are for the free version of Galileo. Documentation for current customers can be found here.
PromptRunSettingsInput
optional deployment_name: null | string;
PromptRunSettingsInput.deployment_name;
optional echo: boolean;
false;
PromptRunSettingsInput.echo;
optional frequency_penalty: number;
0;
PromptRunSettingsInput.frequency_penalty;
optional known_models: object[];
alias: string;
optional alternative_names: string[];
optional api_version: null | string;
optional assistant_role: null | string;
optional cost_by: "tokens" | "characters";
tokens;
optional formatting_tokens: number;
optional input_map: | null | { prefix?: string; prompt: string; suffix?: string; };
null
{ prefix?: string; prompt: string; suffix?: string; }
optional input_price: number;
optional input_token_limit: null | number;
optional integration: | "anthropic" | "aws_bedrock" | "aws_sagemaker" | "azure" | "databricks" | "mistral" | "nvidia" | "openai" | "vegas_gateway" | "vertex_ai" | "writer";
openai;
optional is_chat: boolean;
name: string;
optional output_map: | null | { completion_reason?: null | string; input_token_count?: null | string; output_token_count?: null | string; response: string; token_count?: null | string; };
{ completion_reason?: null | string; input_token_count?: null | string; output_token_count?: null | string; response: string; token_count?: null | string; }
optional output_price: number;
optional output_token_limit: null | number;
optional params_map: object;
optional echo: null | string;
optional frequency_penalty: null | string;
optional logprobs: null | string;
optional max_tokens: null | string;
optional model: null | string;
optional n: null | string;
optional presence_penalty: null | string;
optional response_format: null | string;
optional stop_sequences: null | string;
optional temperature: null | string;
optional tool_choice: null | string;
optional tools: null | string;
optional top_k: null | string;
optional top_logprobs: null | string;
optional top_p: null | string;
optional provides_log_probs: boolean;
optional response_prefix_tokens: number;
optional system_supported: boolean;
optional token_limit: null | number;
optional user_role: null | string;
PromptRunSettingsInput.known_models;
optional logprobs: boolean;
true;
PromptRunSettingsInput.logprobs;
optional max_tokens: number;
1024;
PromptRunSettingsInput.max_tokens;
optional model_alias: string;
gpt - 4.1 - mini;
PromptRunSettingsInput.model_alias;
optional n: number;
1;
PromptRunSettingsInput.n;
optional presence_penalty: number;
PromptRunSettingsInput.presence_penalty;
optional response_format: | null | { [key: string]: string; };
PromptRunSettingsInput.response_format;
optional stop_sequences: null | string[];
PromptRunSettingsInput.stop_sequences;
optional temperature: number;
PromptRunSettingsInput.temperature;
optional tool_choice: | null | string | { function: { name: string; }; type?: string; };
string
{ function: { name: string; }; type?: string; }
function: object;
optional type: string;
function
PromptRunSettingsInput.tool_choice;
optional tools: null | object[];
PromptRunSettingsInput.tools;
optional top_k: number;
40;
PromptRunSettingsInput.top_k;
optional top_logprobs: number;
5;
PromptRunSettingsInput.top_logprobs;
optional top_p: number;
PromptRunSettingsInput.top_p;
Was this page helpful?