config-sample.md +203 −129
15```toml15```toml
16# Codex example configuration (config.toml)16# Codex example configuration (config.toml)
17#17#
1818# This file lists all keys Codex reads from config.toml, along with default# This file lists the main keys Codex reads from config.toml, along with default
19# behaviors, recommended examples, and concise explanations. Adjust as needed.19# behaviors, recommended examples, and concise explanations. Adjust as needed.
20#20#
21# Notes21# Notes
27# Core Model Selection27# Core Model Selection
28################################################################################28################################################################################
29 29
3030# Primary model used by Codex. Recommended example for most users: "gpt-5.4".# Primary model used by Codex. Recommended example for most users: "gpt-5.5".
3131model = "gpt-5.4"model = "gpt-5.5"
32 32
3333# Default communication style for supported models. Default: "friendly".# Communication style for supported models. Allowed values: none | friendly | pragmatic
3434# Allowed values: none | friendly | pragmatic# personality = "pragmatic"
35# personality = "friendly"
36 35
37# Optional model override for /review. Default: unset (uses current session model).36# Optional model override for /review. Default: unset (uses current session model).
3837# review_model = "gpt-5.4"# review_model = "gpt-5.5"
39 38
40# Provider id selected from [model_providers]. Default: "openai".39# Provider id selected from [model_providers]. Default: "openai".
41model_provider = "openai"40model_provider = "openai"
43# Default OSS provider for --oss sessions. When unset, Codex prompts. Default: unset.42# Default OSS provider for --oss sessions. When unset, Codex prompts. Default: unset.
44# oss_provider = "ollama"43# oss_provider = "ollama"
45 44
4645# Optional manual model metadata. When unset, Codex auto-detects from model.# Preferred service tier. `fast` is honored only when enabled in [features].
4746# Uncomment to force values.# service_tier = "flex" # fast | flex
47
48# Optional manual model metadata. When unset, Codex uses model or preset defaults.
48# model_context_window = 128000 # tokens; default: auto for model49# model_context_window = 128000 # tokens; default: auto for model
4950# model_auto_compact_token_limit = 0 # tokens; unset uses model defaults# model_auto_compact_token_limit = 64000 # tokens; unset uses model defaults
5051# tool_output_token_limit = 10000 # tokens stored per tool output# tool_output_token_limit = 12000 # tokens stored per tool output
51# model_catalog_json = "/absolute/path/to/models.json" # optional startup-only model catalog override52# model_catalog_json = "/absolute/path/to/models.json" # optional startup-only model catalog override
52# background_terminal_max_timeout = 300000 # ms; max empty write_stdin poll window (default 5m)53# background_terminal_max_timeout = 300000 # ms; max empty write_stdin poll window (default 5m)
53# log_dir = "/absolute/path/to/codex-logs" # directory for Codex logs; default: "$CODEX_HOME/log"54# log_dir = "/absolute/path/to/codex-logs" # directory for Codex logs; default: "$CODEX_HOME/log"
57# Reasoning & Verbosity (Responses API capable models)58# Reasoning & Verbosity (Responses API capable models)
58################################################################################59################################################################################
59 60
6061# Reasoning effort: minimal | low | medium | high | xhigh (default: medium; `xhigh` availability is model-dependent)# Reasoning effort: minimal | low | medium | high | xhigh
6162model_reasoning_effort = "medium"# model_reasoning_effort = "medium"
63
64# Optional override used when Codex runs in plan mode: none | minimal | low | medium | high | xhigh
65# plan_mode_reasoning_effort = "high"
62 66
6367# Reasoning summary: auto | concise | detailed | none (default: auto)# Reasoning summary: auto | concise | detailed | none
64# model_reasoning_summary = "auto"68# model_reasoning_summary = "auto"
65 69
6670# Text verbosity for GPT-5 family (Responses API): low | medium | high (default: medium)# Text verbosity for GPT-5 family (Responses API): low | medium | high
67# model_verbosity = "medium"71# model_verbosity = "medium"
68 72
6973# Force enable or disable reasoning summaries for current model# Force enable or disable reasoning summaries for current model.
70# model_supports_reasoning_summaries = true74# model_supports_reasoning_summaries = true
71 75
72################################################################################76################################################################################
76# Additional user instructions are injected before AGENTS.md. Default: unset.80# Additional user instructions are injected before AGENTS.md. Default: unset.
77# developer_instructions = ""81# developer_instructions = ""
78 82
79# (Ignored) Optional legacy base instructions override (prefer AGENTS.md). Default: unset.
80# instructions = ""
81
82# Inline override for the history compaction prompt. Default: unset.83# Inline override for the history compaction prompt. Default: unset.
83# compact_prompt = ""84# compact_prompt = ""
84 85
86# Override the default commit co-author trailer. Set to "" to disable it.
87# commit_attribution = "Jane Doe <jane@example.com>"
88
85# Override built-in base instructions with a file path. Default: unset.89# Override built-in base instructions with a file path. Default: unset.
86# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"90# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"
87 91
88# Migration note: experimental_instructions_file was renamed to model_instructions_file (deprecated).
89
90# Load the compact prompt override from a file. Default: unset.92# Load the compact prompt override from a file. Default: unset.
91# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"93# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"
92 94
93# Legacy name for apply_patch_freeform. Default: false
94include_apply_patch_tool = false
95
96################################################################################95################################################################################
97# Notifications96# Notifications
98################################################################################97################################################################################
99 98
100# External notifier program (argv array). When unset: disabled.99# External notifier program (argv array). When unset: disabled.
101100# Example: notify = ["notify-send", "Codex"]# notify = ["notify-send", "Codex"]
102notify = [ ]
103 101
104################################################################################102################################################################################
105# Approval & Sandbox103# Approval & Sandbox
109# - untrusted: only known-safe read-only commands auto-run; others prompt107# - untrusted: only known-safe read-only commands auto-run; others prompt
110# - on-request: model decides when to ask (default)108# - on-request: model decides when to ask (default)
111# - never: never prompt (risky)109# - never: never prompt (risky)
112110# - { reject = { ... } }: auto-reject selected prompt categories# - { granular = { ... } }: allow or auto-reject selected prompt categories
113approval_policy = "on-request"111approval_policy = "on-request"
114112# Example granular auto-reject policy:# Who reviews eligible approval prompts: user (default) | auto_review
115113# approval_policy = { reject = { sandbox_approval = true, rules = false, mcp_elicitations = false } }# approvals_reviewer = "user"
114
115# Example granular policy:
116# approval_policy = { granular = {
117# sandbox_approval = true,
118# rules = true,
119# mcp_elicitations = true,
120# request_permissions = false,
121# skill_approval = false
122# } }
116 123
117# Allow login-shell semantics for shell-based tools when they request `login = true`.124# Allow login-shell semantics for shell-based tools when they request `login = true`.
118# Default: true. Set false to force non-login shells and reject explicit login-shell requests.125# Default: true. Set false to force non-login shells and reject explicit login-shell requests.
123# - workspace-write130# - workspace-write
124# - danger-full-access (no sandbox; extremely risky)131# - danger-full-access (no sandbox; extremely risky)
125sandbox_mode = "read-only"132sandbox_mode = "read-only"
133# Named permissions profile to apply by default. Required before using [permissions.<name>].
134# default_permissions = "workspace"
126 135
127136[windows]# Example filesystem profile. Use `"none"` to deny reads for exact paths or
128137# Native Windows sandbox mode (Windows only): unelevated | elevated# glob patterns. On platforms that need pre-expanded glob matches, set
129138sandbox = "unelevated"# glob_scan_max_depth when using unbounded patterns such as `**`.
139# [permissions.workspace.filesystem]
140# glob_scan_max_depth = 3
141# ":project_roots" = { "." = "write", "**/*.env" = "none" }
142# "/absolute/path/to/secrets" = "none"
130 143
131################################################################################144################################################################################
132# Authentication & Login145# Authentication & Login
135# Where to persist CLI login credentials: file (default) | keyring | auto148# Where to persist CLI login credentials: file (default) | keyring | auto
136cli_auth_credentials_store = "file"149cli_auth_credentials_store = "file"
137 150
138151# Base URL for ChatGPT auth flow (not OpenAI API). Default:# Base URL for ChatGPT auth flow (not OpenAI API).
139chatgpt_base_url = "https://chatgpt.com/backend-api/"152chatgpt_base_url = "https://chatgpt.com/backend-api/"
140 153
154# Optional base URL override for the built-in OpenAI provider.
155# openai_base_url = "https://us.api.openai.com/v1"
156
141# Restrict ChatGPT login to a specific workspace id. Default: unset.157# Restrict ChatGPT login to a specific workspace id. Default: unset.
142158# forced_chatgpt_workspace_id = ""# forced_chatgpt_workspace_id = "00000000-0000-0000-0000-000000000000"
143 159
144# Force login mechanism when Codex would normally auto-select. Default: unset.160# Force login mechanism when Codex would normally auto-select. Default: unset.
145# Allowed values: chatgpt | api161# Allowed values: chatgpt | api
204# If you use --yolo or another full access sandbox setting, web search defaults to live.218# If you use --yolo or another full access sandbox setting, web search defaults to live.
205web_search = "cached"219web_search = "cached"
206 220
207################################################################################
208# Profiles (named presets)
209################################################################################
210
211# Active profile name. When unset, no profile is applied.221# Active profile name. When unset, no profile is applied.
212# profile = "default"222# profile = "default"
213 223
224# Suppress the warning shown when under-development feature flags are enabled.
225# suppress_unstable_features_warning = true
226
214################################################################################227################################################################################
215# Agents (multi-agent roles and limits)228# Agents (multi-agent roles and limits)
216################################################################################229################################################################################
217 230
218231# [agents][agents]
219# Maximum concurrently open agent threads. Default: 6232# Maximum concurrently open agent threads. Default: 6
220# max_threads = 6233# max_threads = 6
221# Maximum nested spawn depth. Root session starts at depth 0. Default: 1234# Maximum nested spawn depth. Root session starts at depth 0. Default: 1
224# job_max_runtime_seconds = 1800237# job_max_runtime_seconds = 1800
225 238
226# [agents.reviewer]239# [agents.reviewer]
227240# description = "Find security, correctness, and test risks in code."# description = "Find correctness, security, and test risks in code."
228# config_file = "./agents/reviewer.toml" # relative to the config.toml that defines it241# config_file = "./agents/reviewer.toml" # relative to the config.toml that defines it
242# nickname_candidates = ["Athena", "Ada"]
229 243
230################################################################################244################################################################################
231# Skills (per-skill overrides)245# Skills (per-skill overrides)
236# path = "/path/to/skill/SKILL.md"250# path = "/path/to/skill/SKILL.md"
237# enabled = false251# enabled = false
238 252
239################################################################################
240# Experimental toggles (legacy; prefer [features])
241################################################################################
242
243experimental_use_unified_exec_tool = false
244
245# Include apply_patch via freeform editing path (affects default tool set). Default: false
246experimental_use_freeform_apply_patch = false
247
248################################################################################253################################################################################
249# Sandbox settings (tables)254# Sandbox settings (tables)
250################################################################################255################################################################################
267[shell_environment_policy]272[shell_environment_policy]
268# inherit: all (default) | core | none273# inherit: all (default) | core | none
269inherit = "all"274inherit = "all"
270275# Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: true# Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: false
271276ignore_default_excludes = trueignore_default_excludes = false
272# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []277# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []
273exclude = []278exclude = []
274# Explicit key/value overrides (always win). Default: {}279# Explicit key/value overrides (always win). Default: {}
278# Experimental: run via user shell profile. Default: false283# Experimental: run via user shell profile. Default: false
279experimental_use_profile = false284experimental_use_profile = false
280 285
286################################################################################
287# Managed network proxy settings
288################################################################################
289
290# Set `default_permissions = "workspace"` before enabling this profile.
291# [permissions.workspace.network]
292# enabled = true
293# proxy_url = "http://127.0.0.1:43128"
294# admin_url = "http://127.0.0.1:43129"
295# enable_socks5 = false
296# socks_url = "http://127.0.0.1:43130"
297# enable_socks5_udp = false
298# allow_upstream_proxy = false
299# dangerously_allow_non_loopback_proxy = false
300# dangerously_allow_non_loopback_admin = false
301# dangerously_allow_all_unix_sockets = false
302# mode = "limited" # limited | full
303# allow_local_binding = false
304#
305# [permissions.workspace.network.domains]
306# "api.openai.com" = "allow"
307# "example.com" = "deny"
308#
309# [permissions.workspace.network.unix_sockets]
310# "/var/run/docker.sock" = "allow"
311
281################################################################################312################################################################################
282# History (table)313# History (table)
283################################################################################314################################################################################
286# save-all (default) | none317# save-all (default) | none
287persistence = "save-all"318persistence = "save-all"
288# Maximum bytes for history file; oldest entries are trimmed when exceeded. Example: 5242880319# Maximum bytes for history file; oldest entries are trimmed when exceeded. Example: 5242880
289320# max_bytes = 0# max_bytes = 5242880
290 321
291################################################################################322################################################################################
292# UI, Notifications, and Misc (tables)323# UI, Notifications, and Misc (tables)
300# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"331# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"
301# notification_method = "auto"332# notification_method = "auto"
302 333
334# When notifications fire: unfocused (default) | always
335# notification_condition = "unfocused"
336
303# Enables welcome/status/spinner animations. Default: true337# Enables welcome/status/spinner animations. Default: true
304animations = true338animations = true
305 339
314# Set to [] to hide the footer.348# Set to [] to hide the footer.
315# status_line = ["model", "context-remaining", "git-branch"]349# status_line = ["model", "context-remaining", "git-branch"]
316 350
351# Ordered list of terminal window/tab title item IDs. When unset, Codex uses:
352# ["spinner", "project"]. Set to [] to clear the title.
353# Available IDs include app-name, project, spinner, status, thread, git-branch, model,
354# and task-progress.
355# terminal_title = ["spinner", "project"]
356
317# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.357# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.
318# You can also add custom .tmTheme files under $CODEX_HOME/themes.358# You can also add custom .tmTheme files under $CODEX_HOME/themes.
319# theme = "catppuccin-mocha"359# theme = "catppuccin-mocha"
320 360
361# Internal tooltip state keyed by model slug. Usually managed by Codex.
362# [tui.model_availability_nux]
363# "gpt-5.4" = 1
364
365# Enable or disable analytics for this machine. When unset, Codex uses its default behavior.
366[analytics]
367enabled = true
368
321# Control whether users can submit feedback from `/feedback`. Default: true369# Control whether users can submit feedback from `/feedback`. Default: true
322[feedback]370[feedback]
323enabled = true371enabled = true
329# hide_rate_limit_model_nudge = true377# hide_rate_limit_model_nudge = true
330# hide_gpt5_1_migration_prompt = true378# hide_gpt5_1_migration_prompt = true
331# "hide_gpt-5.1-codex-max_migration_prompt" = true379# "hide_gpt-5.1-codex-max_migration_prompt" = true
332380# model_migrations = { "gpt-4.1" = "gpt-5.1" }# model_migrations = { "gpt-5.3-codex" = "gpt-5.4" }
333
334# Suppress the warning shown when under-development feature flags are enabled.
335# suppress_unstable_features_warning = true
336 381
337################################################################################382################################################################################
338# Centralized Feature Flags (preferred)383# Centralized Feature Flags (preferred)
342# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.387# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.
343# shell_tool = true388# shell_tool = true
344# apps = false389# apps = false
345390# apps_mcp_gateway = false# codex_hooks = false
346391# web_search_cached = false# unified_exec = true
347392# web_search_request = false# shell_snapshot = true
348393# unified_exec = false# multi_agent = true
349# shell_snapshot = false
350# apply_patch_freeform = false
351# multi_agent = false
352# search_tool = false
353# personality = true394# personality = true
354395# request_rule = true# fast_mode = true
355396# collaboration_modes = true# enable_request_compression = true
356397# use_linux_sandbox_bwrap = false# skill_mcp_dependency_install = true
357398# remote_models = false# prevent_idle_sleep = false
358399# runtime_metrics = false
359400# powershell_utf8 = true################################################################################
360401# child_agents_md = false# Memories (table)
402################################################################################
403
404# Enable memories with [features].memories, then tune memory behavior here.
405# [memories]
406# generate_memories = true
407# use_memories = true
408# disable_on_external_context = false # legacy alias: no_memories_if_mcp_or_web_search
361 409
362################################################################################410################################################################################
363# Define MCP servers under this table. Leave empty to disable.411# Define MCP servers under this table. Leave empty to disable.
372# command = "docs-server" # required420# command = "docs-server" # required
373# args = ["--port", "4000"] # optional421# args = ["--port", "4000"] # optional
374# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is422# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is
375423# env_vars = ["ANOTHER_SECRET"] # optional: forward these from the parent env# env_vars = ["ANOTHER_SECRET"] # optional: forward local parent env vars
424# env_vars = ["LOCAL_TOKEN", { name = "REMOTE_TOKEN", source = "remote" }]
376# cwd = "/path/to/server" # optional working directory override425# cwd = "/path/to/server" # optional working directory override
426# experimental_environment = "remote" # experimental: run stdio via a remote executor
377# startup_timeout_sec = 10.0 # optional; default 10.0 seconds427# startup_timeout_sec = 10.0 # optional; default 10.0 seconds
378# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)428# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)
379# tool_timeout_sec = 60.0 # optional; default 60.0 seconds429# tool_timeout_sec = 60.0 # optional; default 60.0 seconds
380# enabled_tools = ["search", "summarize"] # optional allow-list430# enabled_tools = ["search", "summarize"] # optional allow-list
381# disabled_tools = ["slow-tool"] # optional deny-list (applied after allow-list)431# disabled_tools = ["slow-tool"] # optional deny-list (applied after allow-list)
432# scopes = ["read:docs"] # optional OAuth scopes
433# oauth_resource = "https://docs.example.com/" # optional OAuth resource
382 434
383# --- Example: Streamable HTTP transport ---435# --- Example: Streamable HTTP transport ---
384# [mcp_servers.github]436# [mcp_servers.github]
391# startup_timeout_sec = 10.0 # optional443# startup_timeout_sec = 10.0 # optional
392# tool_timeout_sec = 60.0 # optional444# tool_timeout_sec = 60.0 # optional
393# enabled_tools = ["list_issues"] # optional allow-list445# enabled_tools = ["list_issues"] # optional allow-list
446# disabled_tools = ["delete_issue"] # optional deny-list
447# scopes = ["repo"] # optional OAuth scopes
394 448
395################################################################################449################################################################################
396# Model Providers450# Model Providers
397################################################################################451################################################################################
398 452
399# Built-ins include:453# Built-ins include:
400454# - openai (Responses API; requires login or OPENAI_API_KEY via auth flow)# - openai
401455# - oss (Chat Completions API; defaults to http://localhost:11434/v1)# - ollama
456# - lmstudio
457# These IDs are reserved. Use a different ID for custom providers.
402 458
403[model_providers]459[model_providers]
404 460
406# [model_providers.openaidr]462# [model_providers.openaidr]
407# name = "OpenAI Data Residency"463# name = "OpenAI Data Residency"
408# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix464# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix
409465# wire_api = "responses" # "responses" | "chat" (default varies)# wire_api = "responses" # only supported value
410466# # requires_openai_auth = true # built-in OpenAI defaults to true# # requires_openai_auth = true # use only for providers backed by OpenAI auth
411# # request_max_retries = 4 # default 4; max 100467# # request_max_retries = 4 # default 4; max 100
412# # stream_max_retries = 5 # default 5; max 100468# # stream_max_retries = 5 # default 5; max 100
413# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)469# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)
470# # supports_websockets = true # optional
414# # experimental_bearer_token = "sk-example" # optional dev-only direct bearer token471# # experimental_bearer_token = "sk-example" # optional dev-only direct bearer token
415# # http_headers = { "X-Example" = "value" }472# # http_headers = { "X-Example" = "value" }
416# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }473# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }
417 474
418475# --- Example: Azure (Chat/Responses depending on endpoint) ---# --- Example: Azure/OpenAI-compatible provider ---
419# [model_providers.azure]476# [model_providers.azure]
420# name = "Azure"477# name = "Azure"
421# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"478# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"
422479# wire_api = "responses" # or "chat" per endpoint# wire_api = "responses"
423# query_params = { api-version = "2025-04-01-preview" }480# query_params = { api-version = "2025-04-01-preview" }
424# env_key = "AZURE_OPENAI_API_KEY"481# env_key = "AZURE_OPENAI_API_KEY"
425482# # env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"
483# # supports_websockets = false
484
485# --- Example: command-backed bearer token auth ---
486# [model_providers.proxy]
487# name = "OpenAI using LLM proxy"
488# base_url = "https://proxy.example.com/v1"
489# wire_api = "responses"
490#
491# [model_providers.proxy.auth]
492# command = "/usr/local/bin/fetch-codex-token"
493# args = ["--audience", "codex"]
494# timeout_ms = 5000
495# refresh_interval_ms = 300000
426 496
427# --- Example: Local OSS (e.g., Ollama-compatible) ---497# --- Example: Local OSS (e.g., Ollama-compatible) ---
428498# [model_providers.ollama]# [model_providers.local_ollama]
429# name = "Ollama"499# name = "Ollama"
430# base_url = "http://localhost:11434/v1"500# base_url = "http://localhost:11434/v1"
431501# wire_api = "chat"# wire_api = "responses"
432
433################################################################################
434# Profiles (named presets)
435################################################################################
436
437[profiles]
438
439# [profiles.default]
440# model = "gpt-5.4"
441# model_provider = "openai"
442# approval_policy = "on-request"
443# sandbox_mode = "read-only"
444# oss_provider = "ollama"
445# model_reasoning_effort = "medium"
446# model_reasoning_summary = "auto"
447# model_verbosity = "medium"
448# personality = "friendly" # or "pragmatic" or "none"
449# chatgpt_base_url = "https://chatgpt.com/backend-api/"
450# model_catalog_json = "./models.json"
451# experimental_compact_prompt_file = "./compact_prompt.txt"
452# include_apply_patch_tool = false
453# experimental_use_unified_exec_tool = false
454# experimental_use_freeform_apply_patch = false
455# tools.web_search = false # deprecated legacy alias; prefer top-level `web_search`
456# features = { unified_exec = false }
457 502
458################################################################################503################################################################################
459# Apps / Connectors504# Apps / Connectors
477# enabled = false522# enabled = false
478# approval_mode = "approve"523# approval_mode = "approve"
479 524
525# Optional tool suggestion allowlist for connectors or plugins Codex can offer to install.
526# [tool_suggest]
527# discoverables = [
528# { type = "connector", id = "gmail" },
529# { type = "plugin", id = "figma@openai-curated" },
530# ]
531
532################################################################################
533# Profiles (named presets)
534################################################################################
535
536[profiles]
537
538# [profiles.default]
539# model = "gpt-5.4"
540# model_provider = "openai"
541# approval_policy = "on-request"
542# sandbox_mode = "read-only"
543# service_tier = "flex"
544# oss_provider = "ollama"
545# model_reasoning_effort = "medium"
546# plan_mode_reasoning_effort = "high"
547# model_reasoning_summary = "auto"
548# model_verbosity = "medium"
549# personality = "pragmatic" # or "friendly" or "none"
550# chatgpt_base_url = "https://chatgpt.com/backend-api/"
551# model_catalog_json = "./models.json"
552# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"
553# experimental_compact_prompt_file = "./compact_prompt.txt"
554# tools_view_image = true
555# features = { unified_exec = false }
556
480################################################################################557################################################################################
481# Projects (trust levels)558# Projects (trust levels)
482################################################################################559################################################################################
483 560
484# Mark specific worktrees as trusted or untrusted.
485[projects]561[projects]
562# Mark specific worktrees as trusted or untrusted.
486# [projects."/absolute/path/to/project"]563# [projects."/absolute/path/to/project"]
487# trust_level = "trusted" # or "untrusted"564# trust_level = "trusted" # or "untrusted"
488 565
566################################################################################
567# Tools
568################################################################################
569
570[tools]
571# view_image = true
572
489################################################################################573################################################################################
490# OpenTelemetry (OTEL) - disabled by default574# OpenTelemetry (OTEL) - disabled by default
491################################################################################575################################################################################
499exporter = "none"583exporter = "none"
500# Trace exporter: none (default) | otlp-http | otlp-grpc584# Trace exporter: none (default) | otlp-http | otlp-grpc
501trace_exporter = "none"585trace_exporter = "none"
586# Metrics exporter: none | statsig | otlp-http | otlp-grpc
587metrics_exporter = "statsig"
502 588
503# Example OTLP/HTTP exporter configuration589# Example OTLP/HTTP exporter configuration
504# [otel.exporter."otlp-http"]590# [otel.exporter."otlp-http"]
508# [otel.exporter."otlp-http".headers]594# [otel.exporter."otlp-http".headers]
509# "x-otlp-api-key" = "${OTLP_TOKEN}"595# "x-otlp-api-key" = "${OTLP_TOKEN}"
510 596
511# Example OTLP/gRPC exporter configuration
512# [otel.exporter."otlp-grpc"]
513# endpoint = "https://otel.example.com:4317",
514# headers = { "x-otlp-meta" = "abc123" }
515
516# Example OTLP exporter with mutual TLS
517# [otel.exporter."otlp-http"]
518# endpoint = "https://otel.example.com/v1/logs"
519# protocol = "binary"
520
521# [otel.exporter."otlp-http".headers]
522# "x-otlp-api-key" = "${OTLP_TOKEN}"
523
524# [otel.exporter."otlp-http".tls]597# [otel.exporter."otlp-http".tls]
525# ca-certificate = "certs/otel-ca.pem"598# ca-certificate = "certs/otel-ca.pem"
526# client-certificate = "/etc/codex/certs/client.pem"599# client-certificate = "/etc/codex/certs/client.pem"
527# client-private-key = "/etc/codex/certs/client-key.pem"600# client-private-key = "/etc/codex/certs/client-key.pem"
528```
529 601
530602################################################################################# Example OTLP/gRPC trace exporter configuration
603# [otel.trace_exporter."otlp-grpc"]
604# endpoint = "https://otel.example.com:4317"
605# headers = { "x-otlp-meta" = "abc123" }
531 606
607################################################################################
532# Windows608# Windows
533
534################################################################################609################################################################################
535 610
536[windows]611[windows]
537612 # Native Windows sandbox mode (Windows only): unelevated | elevated
538613# Native Windows sandbox mode (Windows only). The example below uses thesandbox = "unelevated"
539614 ```
540# recommended elevated mode.
541
542sandbox = “elevated”