config-sample.md +289 −112
1# Sample Configuration1# Sample Configuration
2 2
33Use this example configuration as a starting point. It includes most keys Codex reads from `config.toml`, along with defaults and short notes.Use this example configuration as a starting point. It includes most keys Codex reads from `config.toml`, along with default behaviors, recommended values where helpful, and short notes.
4 4
5For explanations and guidance, see:5For explanations and guidance, see:
6 6
7- [Config basics](https://developers.openai.com/codex/config-basic)7- [Config basics](https://developers.openai.com/codex/config-basic)
8- [Advanced Config](https://developers.openai.com/codex/config-advanced)8- [Advanced Config](https://developers.openai.com/codex/config-advanced)
9- [Config Reference](https://developers.openai.com/codex/config-reference)9- [Config Reference](https://developers.openai.com/codex/config-reference)
10- [Sandbox and approvals](https://developers.openai.com/codex/agent-approvals-security#sandbox-and-approvals)
11- [Managed configuration](https://developers.openai.com/codex/enterprise/managed-configuration)
10 12
11Use the snippet below as a reference. Copy only the keys and sections you need into `~/.codex/config.toml` (or into a project-scoped `.codex/config.toml`), then adjust values for your setup.13Use the snippet below as a reference. Copy only the keys and sections you need into `~/.codex/config.toml` (or into a project-scoped `.codex/config.toml`), then adjust values for your setup.
12 14
13```toml15```toml
14# Codex example configuration (config.toml)16# Codex example configuration (config.toml)
15#17#
1618# This file lists all keys Codex reads from config.toml, their default values,# This file lists the main keys Codex reads from config.toml, along with default
1719# and concise explanations. Values here mirror the effective defaults compiled# behaviors, recommended examples, and concise explanations. Adjust as needed.
18# into the CLI. Adjust as needed.
19#20#
20# Notes21# Notes
21# - Root keys must appear before tables in TOML.22# - Root keys must appear before tables in TOML.
26# Core Model Selection27# Core Model Selection
27################################################################################28################################################################################
28 29
2930# Primary model used by Codex. Default: "gpt-5.2-codex" on all platforms.# Primary model used by Codex. Recommended example for most users: "gpt-5.5".
3031model = "gpt-5.2-codex"model = "gpt-5.5"
31 32
3233# Default communication style for supported models. Default: "friendly".# Communication style for supported models. Allowed values: none | friendly | pragmatic
3334# Allowed values: none | friendly | pragmatic# personality = "pragmatic"
34# personality = "friendly"
35 35
36# Optional model override for /review. Default: unset (uses current session model).36# Optional model override for /review. Default: unset (uses current session model).
3737# review_model = "gpt-5.2-codex"# review_model = "gpt-5.5"
38 38
39# Provider id selected from [model_providers]. Default: "openai".39# Provider id selected from [model_providers]. Default: "openai".
40model_provider = "openai"40model_provider = "openai"
42# Default OSS provider for --oss sessions. When unset, Codex prompts. Default: unset.42# Default OSS provider for --oss sessions. When unset, Codex prompts. Default: unset.
43# oss_provider = "ollama"43# oss_provider = "ollama"
44 44
4545# Optional manual model metadata. When unset, Codex auto-detects from model.# Preferred service tier. `fast` is honored only when enabled in [features].
4646# Uncomment to force values.# service_tier = "flex" # fast | flex
47
48# Optional manual model metadata. When unset, Codex uses model or preset defaults.
47# model_context_window = 128000 # tokens; default: auto for model49# model_context_window = 128000 # tokens; default: auto for model
4850# model_auto_compact_token_limit = 0 # tokens; unset uses model defaults# model_auto_compact_token_limit = 64000 # tokens; unset uses model defaults
4951# tool_output_token_limit = 10000 # tokens stored per tool output; default: 10000 for gpt-5.2-codex# tool_output_token_limit = 12000 # tokens stored per tool output
52# model_catalog_json = "/absolute/path/to/models.json" # optional startup-only model catalog override
53# background_terminal_max_timeout = 300000 # ms; max empty write_stdin poll window (default 5m)
50# log_dir = "/absolute/path/to/codex-logs" # directory for Codex logs; default: "$CODEX_HOME/log"54# log_dir = "/absolute/path/to/codex-logs" # directory for Codex logs; default: "$CODEX_HOME/log"
55# sqlite_home = "/absolute/path/to/codex-state" # optional SQLite-backed runtime state directory
51 56
52################################################################################57################################################################################
53# Reasoning & Verbosity (Responses API capable models)58# Reasoning & Verbosity (Responses API capable models)
54################################################################################59################################################################################
55 60
5661# Reasoning effort: minimal | low | medium | high | xhigh (default: medium; xhigh on gpt-5.2-codex and gpt-5.2)# Reasoning effort: minimal | low | medium | high | xhigh
5762model_reasoning_effort = "medium"# model_reasoning_effort = "medium"
63
64# Optional override used when Codex runs in plan mode: none | minimal | low | medium | high | xhigh
65# plan_mode_reasoning_effort = "high"
58 66
5967# Reasoning summary: auto | concise | detailed | none (default: auto)# Reasoning summary: auto | concise | detailed | none
60# model_reasoning_summary = "auto"68# model_reasoning_summary = "auto"
61 69
6270# Text verbosity for GPT-5 family (Responses API): low | medium | high (default: medium)# Text verbosity for GPT-5 family (Responses API): low | medium | high
63# model_verbosity = "medium"71# model_verbosity = "medium"
64 72
6573# Force enable or disable reasoning summaries for current model# Force enable or disable reasoning summaries for current model.
66# model_supports_reasoning_summaries = true74# model_supports_reasoning_summaries = true
67 75
68################################################################################76################################################################################
72# Additional user instructions are injected before AGENTS.md. Default: unset.80# Additional user instructions are injected before AGENTS.md. Default: unset.
73# developer_instructions = ""81# developer_instructions = ""
74 82
75# (Ignored) Optional legacy base instructions override (prefer AGENTS.md). Default: unset.
76# instructions = ""
77
78# Inline override for the history compaction prompt. Default: unset.83# Inline override for the history compaction prompt. Default: unset.
79# compact_prompt = ""84# compact_prompt = ""
80 85
86# Override the default commit co-author trailer. This only takes effect when
87# [features].codex_git_commit is enabled. When enabled and unset, Codex uses
88# "Codex <noreply@openai.com>". Set to "" to disable it.
89# commit_attribution = "Jane Doe <jane@example.com>"
90
81# Override built-in base instructions with a file path. Default: unset.91# Override built-in base instructions with a file path. Default: unset.
82# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"92# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"
83 93
84# Migration note: experimental_instructions_file was renamed to model_instructions_file (deprecated).
85
86# Load the compact prompt override from a file. Default: unset.94# Load the compact prompt override from a file. Default: unset.
87# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"95# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"
88 96
89# Legacy name for apply_patch_freeform. Default: false
90include_apply_patch_tool = false
91
92################################################################################97################################################################################
93# Notifications98# Notifications
94################################################################################99################################################################################
95 100
96# External notifier program (argv array). When unset: disabled.101# External notifier program (argv array). When unset: disabled.
97102# Example: notify = ["notify-send", "Codex"]# notify = ["notify-send", "Codex"]
98notify = [ ]
99 103
100################################################################################104################################################################################
101# Approval & Sandbox105# Approval & Sandbox
105# - untrusted: only known-safe read-only commands auto-run; others prompt109# - untrusted: only known-safe read-only commands auto-run; others prompt
106# - on-request: model decides when to ask (default)110# - on-request: model decides when to ask (default)
107# - never: never prompt (risky)111# - never: never prompt (risky)
112# - { granular = { ... } }: allow or auto-reject selected prompt categories
108approval_policy = "on-request"113approval_policy = "on-request"
114# Who reviews eligible approval prompts: user (default) | auto_review
115# approvals_reviewer = "user"
116
117# Example granular policy:
118# approval_policy = { granular = {
119# sandbox_approval = true,
120# rules = true,
121# mcp_elicitations = true,
122# request_permissions = false,
123# skill_approval = false
124# } }
125
126# Allow login-shell semantics for shell-based tools when they request `login = true`.
127# Default: true. Set false to force non-login shells and reject explicit login-shell requests.
128allow_login_shell = true
109 129
110# Filesystem/network sandbox policy for tool calls:130# Filesystem/network sandbox policy for tool calls:
111# - read-only (default)131# - read-only (default)
112# - workspace-write132# - workspace-write
113# - danger-full-access (no sandbox; extremely risky)133# - danger-full-access (no sandbox; extremely risky)
114sandbox_mode = "read-only"134sandbox_mode = "read-only"
135# Named permissions profile to apply by default. Built-ins:
136# :read-only | :workspace | :danger-no-sandbox
137# Use a custom name such as "workspace" only when you also define [permissions.workspace].
138# default_permissions = ":workspace"
139
140# Example filesystem profile. Use `"none"` to deny reads for exact paths or
141# glob patterns. On platforms that need pre-expanded glob matches, set
142# glob_scan_max_depth when using unbounded patterns such as `**`.
143# [permissions.workspace.filesystem]
144# glob_scan_max_depth = 3
145# ":project_roots" = { "." = "write", "**/*.env" = "none" }
146# "/absolute/path/to/secrets" = "none"
115 147
116################################################################################148################################################################################
117# Authentication & Login149# Authentication & Login
120# Where to persist CLI login credentials: file (default) | keyring | auto152# Where to persist CLI login credentials: file (default) | keyring | auto
121cli_auth_credentials_store = "file"153cli_auth_credentials_store = "file"
122 154
123155# Base URL for ChatGPT auth flow (not OpenAI API). Default:# Base URL for ChatGPT auth flow (not OpenAI API).
124chatgpt_base_url = "https://chatgpt.com/backend-api/"156chatgpt_base_url = "https://chatgpt.com/backend-api/"
125 157
158# Optional base URL override for the built-in OpenAI provider.
159# openai_base_url = "https://us.api.openai.com/v1"
160
126# Restrict ChatGPT login to a specific workspace id. Default: unset.161# Restrict ChatGPT login to a specific workspace id. Default: unset.
127162# forced_chatgpt_workspace_id = ""# forced_chatgpt_workspace_id = "00000000-0000-0000-0000-000000000000"
128 163
129# Force login mechanism when Codex would normally auto-select. Default: unset.164# Force login mechanism when Codex would normally auto-select. Default: unset.
130# Allowed values: chatgpt | api165# Allowed values: chatgpt | api
132 167
133# Preferred store for MCP OAuth credentials: auto (default) | file | keyring168# Preferred store for MCP OAuth credentials: auto (default) | file | keyring
134mcp_oauth_credentials_store = "auto"169mcp_oauth_credentials_store = "auto"
135
136# Optional fixed port for MCP OAuth callback: 1-65535. Default: unset.170# Optional fixed port for MCP OAuth callback: 1-65535. Default: unset.
137# mcp_oauth_callback_port = 4321171# mcp_oauth_callback_port = 4321
172# Optional redirect URI override for MCP OAuth login (for example, remote devbox ingress).
173# Custom callback paths are supported. `mcp_oauth_callback_port` still controls the listener port.
174# mcp_oauth_callback_url = "https://devbox.example.internal/callback"
138 175
139################################################################################176################################################################################
140# Project Documentation Controls177# Project Documentation Controls
185# If you use --yolo or another full access sandbox setting, web search defaults to live.222# If you use --yolo or another full access sandbox setting, web search defaults to live.
186web_search = "cached"223web_search = "cached"
187 224
225# Active profile name. When unset, no profile is applied.
226# profile = "default"
227
228# Suppress the warning shown when under-development feature flags are enabled.
229# suppress_unstable_features_warning = true
230
188################################################################################231################################################################################
189232# Profiles (named presets)# Agents (multi-agent roles and limits)
190################################################################################233################################################################################
191 234
192235# Active profile name. When unset, no profile is applied.[agents]
193236# profile = "default"# Maximum concurrently open agent threads. Default: 6
237# max_threads = 6
238# Maximum nested spawn depth. Root session starts at depth 0. Default: 1
239# max_depth = 1
240# Default timeout per worker for spawn_agents_on_csv jobs. When unset, the tool defaults to 1800 seconds.
241# job_max_runtime_seconds = 1800
242
243# [agents.reviewer]
244# description = "Find correctness, security, and test risks in code."
245# config_file = "./agents/reviewer.toml" # relative to the config.toml that defines it
246# nickname_candidates = ["Athena", "Ada"]
194 247
195################################################################################248################################################################################
196# Skills (per-skill overrides)249# Skills (per-skill overrides)
198 251
199# Disable or re-enable a specific skill without deleting it.252# Disable or re-enable a specific skill without deleting it.
200[[skills.config]]253[[skills.config]]
201254# path = "/path/to/skill"# path = "/path/to/skill/SKILL.md"
202# enabled = false255# enabled = false
203 256
204################################################################################
205# Experimental toggles (legacy; prefer [features])
206################################################################################
207
208experimental_use_unified_exec_tool = false
209
210# Include apply_patch via freeform editing path (affects default tool set). Default: false
211experimental_use_freeform_apply_patch = false
212
213################################################################################257################################################################################
214# Sandbox settings (tables)258# Sandbox settings (tables)
215################################################################################259################################################################################
232[shell_environment_policy]276[shell_environment_policy]
233# inherit: all (default) | core | none277# inherit: all (default) | core | none
234inherit = "all"278inherit = "all"
235279# Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: true# Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: false
236280ignore_default_excludes = trueignore_default_excludes = false
237# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []281# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []
238exclude = []282exclude = []
239# Explicit key/value overrides (always win). Default: {}283# Explicit key/value overrides (always win). Default: {}
243# Experimental: run via user shell profile. Default: false287# Experimental: run via user shell profile. Default: false
244experimental_use_profile = false288experimental_use_profile = false
245 289
290################################################################################
291# Managed network proxy settings
292################################################################################
293
294# Set `default_permissions = "workspace"` before enabling this profile.
295# [permissions.workspace.network]
296# enabled = true
297# proxy_url = "http://127.0.0.1:43128"
298# admin_url = "http://127.0.0.1:43129"
299# enable_socks5 = false
300# socks_url = "http://127.0.0.1:43130"
301# enable_socks5_udp = false
302# allow_upstream_proxy = false
303# dangerously_allow_non_loopback_proxy = false
304# dangerously_allow_non_loopback_admin = false
305# dangerously_allow_all_unix_sockets = false
306# mode = "limited" # limited | full
307# allow_local_binding = false
308#
309# [permissions.workspace.network.domains]
310# "api.openai.com" = "allow"
311# "example.com" = "deny"
312#
313# [permissions.workspace.network.unix_sockets]
314# "/var/run/docker.sock" = "allow"
315
246################################################################################316################################################################################
247# History (table)317# History (table)
248################################################################################318################################################################################
251# save-all (default) | none321# save-all (default) | none
252persistence = "save-all"322persistence = "save-all"
253# Maximum bytes for history file; oldest entries are trimmed when exceeded. Example: 5242880323# Maximum bytes for history file; oldest entries are trimmed when exceeded. Example: 5242880
254324# max_bytes = 0# max_bytes = 5242880
255 325
256################################################################################326################################################################################
257# UI, Notifications, and Misc (tables)327# UI, Notifications, and Misc (tables)
265# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"335# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"
266# notification_method = "auto"336# notification_method = "auto"
267 337
338# When notifications fire: unfocused (default) | always
339# notification_condition = "unfocused"
340
268# Enables welcome/status/spinner animations. Default: true341# Enables welcome/status/spinner animations. Default: true
269animations = true342animations = true
270 343
274# Control alternate screen usage (auto skips it in Zellij to preserve scrollback).347# Control alternate screen usage (auto skips it in Zellij to preserve scrollback).
275# alternate_screen = "auto"348# alternate_screen = "auto"
276 349
277350# Ordered list of footer status-line item IDs. Default: null (disabled).# Ordered list of footer status-line item IDs. When unset, Codex uses:
351# ["model-with-reasoning", "context-remaining", "current-dir"].
352# Set to [] to hide the footer.
278# status_line = ["model", "context-remaining", "git-branch"]353# status_line = ["model", "context-remaining", "git-branch"]
279 354
355# Ordered list of terminal window/tab title item IDs. When unset, Codex uses:
356# ["spinner", "project"]. Set to [] to clear the title.
357# Available IDs include app-name, project, spinner, status, thread, git-branch, model,
358# and task-progress.
359# terminal_title = ["spinner", "project"]
360
361# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.
362# You can also add custom .tmTheme files under $CODEX_HOME/themes.
363# theme = "catppuccin-mocha"
364
365# Custom key bindings. Context-specific bindings override [tui.keymap.global].
366# Use [] to unbind an action.
367# [tui.keymap.global]
368# open_transcript = "ctrl-t"
369# open_external_editor = []
370#
371# [tui.keymap.composer]
372# submit = ["enter", "ctrl-m"]
373
374# Internal tooltip state keyed by model slug. Usually managed by Codex.
375# [tui.model_availability_nux]
376# "gpt-5.4" = 1
377
378# Enable or disable analytics for this machine. When unset, Codex uses its default behavior.
379[analytics]
380enabled = true
381
280# Control whether users can submit feedback from `/feedback`. Default: true382# Control whether users can submit feedback from `/feedback`. Default: true
281[feedback]383[feedback]
282enabled = true384enabled = true
288# hide_rate_limit_model_nudge = true390# hide_rate_limit_model_nudge = true
289# hide_gpt5_1_migration_prompt = true391# hide_gpt5_1_migration_prompt = true
290# "hide_gpt-5.1-codex-max_migration_prompt" = true392# "hide_gpt-5.1-codex-max_migration_prompt" = true
291393# model_migrations = { "gpt-4.1" = "gpt-5.1" }# model_migrations = { "gpt-5.3-codex" = "gpt-5.4" }
292
293# Suppress the warning shown when under-development feature flags are enabled.
294# suppress_unstable_features_warning = true
295 394
296################################################################################395################################################################################
297# Centralized Feature Flags (preferred)396# Centralized Feature Flags (preferred)
299 398
300[features]399[features]
301# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.400# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.
302401shell_tool = true# shell_tool = true
303# apps = false402# apps = false
304403# apps_mcp_gateway = false# codex_git_commit = false
305404# Deprecated legacy toggles; prefer the top-level `web_search` setting.# codex_hooks = false
306405# web_search = false# unified_exec = true
307406# web_search_cached = false# shell_snapshot = true
308407# web_search_request = false# multi_agent = true
309unified_exec = false
310shell_snapshot = false
311apply_patch_freeform = false
312# search_tool = false
313# personality = true408# personality = true
314409request_rule = true# fast_mode = true
315410collaboration_modes = true# enable_request_compression = true
316411use_linux_sandbox_bwrap = false# skill_mcp_dependency_install = true
317412experimental_windows_sandbox = false# prevent_idle_sleep = false
318413elevated_windows_sandbox = false
319414remote_models = false################################################################################
320415runtime_metrics = false# Memories (table)
321416powershell_utf8 = true################################################################################
322417child_agents_md = false
418# Enable memories with [features].memories, then tune memory behavior here.
419# [memories]
420# generate_memories = true
421# use_memories = true
422# disable_on_external_context = false # legacy alias: no_memories_if_mcp_or_web_search
423
424################################################################################
425# Lifecycle hooks can be configured here inline or in a sibling hooks.json.
426################################################################################
427
428# [hooks]
429# [[hooks.PreToolUse]]
430# matcher = "^Bash$"
431#
432# [[hooks.PreToolUse.hooks]]
433# type = "command"
434# command = 'python3 "/absolute/path/to/pre_tool_use_policy.py"'
435# timeout = 30
436# statusMessage = "Checking Bash command"
323 437
324################################################################################438################################################################################
325# Define MCP servers under this table. Leave empty to disable.439# Define MCP servers under this table. Leave empty to disable.
334# command = "docs-server" # required448# command = "docs-server" # required
335# args = ["--port", "4000"] # optional449# args = ["--port", "4000"] # optional
336# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is450# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is
337451# env_vars = ["ANOTHER_SECRET"] # optional: forward these from the parent env# env_vars = ["ANOTHER_SECRET"] # optional: forward local parent env vars
452# env_vars = ["LOCAL_TOKEN", { name = "REMOTE_TOKEN", source = "remote" }]
338# cwd = "/path/to/server" # optional working directory override453# cwd = "/path/to/server" # optional working directory override
454# experimental_environment = "remote" # experimental: run stdio via a remote executor
339# startup_timeout_sec = 10.0 # optional; default 10.0 seconds455# startup_timeout_sec = 10.0 # optional; default 10.0 seconds
340# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)456# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)
341# tool_timeout_sec = 60.0 # optional; default 60.0 seconds457# tool_timeout_sec = 60.0 # optional; default 60.0 seconds
342# enabled_tools = ["search", "summarize"] # optional allow-list458# enabled_tools = ["search", "summarize"] # optional allow-list
343# disabled_tools = ["slow-tool"] # optional deny-list (applied after allow-list)459# disabled_tools = ["slow-tool"] # optional deny-list (applied after allow-list)
460# scopes = ["read:docs"] # optional OAuth scopes
461# oauth_resource = "https://docs.example.com/" # optional OAuth resource
344 462
345# --- Example: Streamable HTTP transport ---463# --- Example: Streamable HTTP transport ---
346# [mcp_servers.github]464# [mcp_servers.github]
353# startup_timeout_sec = 10.0 # optional471# startup_timeout_sec = 10.0 # optional
354# tool_timeout_sec = 60.0 # optional472# tool_timeout_sec = 60.0 # optional
355# enabled_tools = ["list_issues"] # optional allow-list473# enabled_tools = ["list_issues"] # optional allow-list
474# disabled_tools = ["delete_issue"] # optional deny-list
475# scopes = ["repo"] # optional OAuth scopes
356 476
357################################################################################477################################################################################
358# Model Providers478# Model Providers
359################################################################################479################################################################################
360 480
361# Built-ins include:481# Built-ins include:
362482# - openai (Responses API; requires login or OPENAI_API_KEY via auth flow)# - openai
363483# - oss (Chat Completions API; defaults to http://localhost:11434/v1)# - ollama
484# - lmstudio
485# - amazon-bedrock
486# These IDs are reserved. Use a different ID for custom providers.
364 487
365[model_providers]488[model_providers]
366 489
490# --- Example: built-in Amazon Bedrock provider options ---
491# model_provider = "amazon-bedrock"
492# model = "<bedrock-model-id>"
493# [model_providers.amazon-bedrock.aws]
494# profile = "default"
495# region = "eu-central-1"
496
367# --- Example: OpenAI data residency with explicit base URL or headers ---497# --- Example: OpenAI data residency with explicit base URL or headers ---
368# [model_providers.openaidr]498# [model_providers.openaidr]
369# name = "OpenAI Data Residency"499# name = "OpenAI Data Residency"
370# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix500# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix
371501# wire_api = "responses" # "responses" | "chat" (default varies)# wire_api = "responses" # only supported value
372502# # requires_openai_auth = true # built-in OpenAI defaults to true# # requires_openai_auth = true # use only for providers backed by OpenAI auth
373# # request_max_retries = 4 # default 4; max 100503# # request_max_retries = 4 # default 4; max 100
374# # stream_max_retries = 5 # default 5; max 100504# # stream_max_retries = 5 # default 5; max 100
375# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)505# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)
506# # supports_websockets = true # optional
376# # experimental_bearer_token = "sk-example" # optional dev-only direct bearer token507# # experimental_bearer_token = "sk-example" # optional dev-only direct bearer token
377# # http_headers = { "X-Example" = "value" }508# # http_headers = { "X-Example" = "value" }
378# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }509# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }
379 510
380511# --- Example: Azure (Chat/Responses depending on endpoint) ---# --- Example: Azure/OpenAI-compatible provider ---
381# [model_providers.azure]512# [model_providers.azure]
382# name = "Azure"513# name = "Azure"
383# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"514# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"
384515# wire_api = "responses" # or "chat" per endpoint# wire_api = "responses"
385# query_params = { api-version = "2025-04-01-preview" }516# query_params = { api-version = "2025-04-01-preview" }
386# env_key = "AZURE_OPENAI_API_KEY"517# env_key = "AZURE_OPENAI_API_KEY"
387518# # env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"
519# # supports_websockets = false
520
521# --- Example: command-backed bearer token auth ---
522# [model_providers.proxy]
523# name = "OpenAI using LLM proxy"
524# base_url = "https://proxy.example.com/v1"
525# wire_api = "responses"
526#
527# [model_providers.proxy.auth]
528# command = "/usr/local/bin/fetch-codex-token"
529# args = ["--audience", "codex"]
530# timeout_ms = 5000
531# refresh_interval_ms = 300000
388 532
389# --- Example: Local OSS (e.g., Ollama-compatible) ---533# --- Example: Local OSS (e.g., Ollama-compatible) ---
390534# [model_providers.ollama]# [model_providers.local_ollama]
391# name = "Ollama"535# name = "Ollama"
392# base_url = "http://localhost:11434/v1"536# base_url = "http://localhost:11434/v1"
393537# wire_api = "chat"# wire_api = "responses"
538
539################################################################################
540# Apps / Connectors
541################################################################################
542
543# Optional per-app controls.
544[apps]
545# [_default] applies to all apps unless overridden per app.
546# [apps._default]
547# enabled = true
548# destructive_enabled = true
549# open_world_enabled = true
550#
551# [apps.google_drive]
552# enabled = false
553# destructive_enabled = false # block destructive-hint tools for this app
554# default_tools_enabled = true
555# default_tools_approval_mode = "prompt" # auto | prompt | approve
556#
557# [apps.google_drive.tools."files/delete"]
558# enabled = false
559# approval_mode = "approve"
560
561# Optional tool suggestion allowlist for connectors or plugins Codex can offer to install.
562# [tool_suggest]
563# discoverables = [
564# { type = "connector", id = "gmail" },
565# { type = "plugin", id = "figma@openai-curated" },
566# ]
567# disabled_tools = [
568# { type = "plugin", id = "slack@openai-curated" },
569# { type = "connector", id = "connector_googlecalendar" },
570# ]
394 571
395################################################################################572################################################################################
396# Profiles (named presets)573# Profiles (named presets)
399[profiles]576[profiles]
400 577
401# [profiles.default]578# [profiles.default]
402579# model = "gpt-5.2-codex"# model = "gpt-5.4"
403# model_provider = "openai"580# model_provider = "openai"
404# approval_policy = "on-request"581# approval_policy = "on-request"
405# sandbox_mode = "read-only"582# sandbox_mode = "read-only"
583# service_tier = "flex"
406# oss_provider = "ollama"584# oss_provider = "ollama"
407# model_reasoning_effort = "medium"585# model_reasoning_effort = "medium"
586# plan_mode_reasoning_effort = "high"
408# model_reasoning_summary = "auto"587# model_reasoning_summary = "auto"
409# model_verbosity = "medium"588# model_verbosity = "medium"
410589# personality = "friendly" # or "pragmatic" or "none"# personality = "pragmatic" # or "friendly" or "none"
411# chatgpt_base_url = "https://chatgpt.com/backend-api/"590# chatgpt_base_url = "https://chatgpt.com/backend-api/"
591# model_catalog_json = "./models.json"
592# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"
412# experimental_compact_prompt_file = "./compact_prompt.txt"593# experimental_compact_prompt_file = "./compact_prompt.txt"
413594# include_apply_patch_tool = false# tools_view_image = true
414# experimental_use_unified_exec_tool = false
415# experimental_use_freeform_apply_patch = false
416# tools.web_search = false # deprecated legacy alias; prefer top-level `web_search`
417# features = { unified_exec = false }595# features = { unified_exec = false }
418 596
419################################################################################
420# Apps / Connectors
421################################################################################
422
423# Optional per-app controls.
424[apps]
425# [apps.google_drive]
426# enabled = false
427# disabled_reason = "user" # or "unknown"
428
429################################################################################597################################################################################
430# Projects (trust levels)598# Projects (trust levels)
431################################################################################599################################################################################
432 600
433# Mark specific worktrees as trusted or untrusted.
434[projects]601[projects]
602# Mark specific worktrees as trusted or untrusted.
435# [projects."/absolute/path/to/project"]603# [projects."/absolute/path/to/project"]
436# trust_level = "trusted" # or "untrusted"604# trust_level = "trusted" # or "untrusted"
437 605
606################################################################################
607# Tools
608################################################################################
609
610[tools]
611# view_image = true
612
438################################################################################613################################################################################
439# OpenTelemetry (OTEL) - disabled by default614# OpenTelemetry (OTEL) - disabled by default
440################################################################################615################################################################################
448exporter = "none"623exporter = "none"
449# Trace exporter: none (default) | otlp-http | otlp-grpc624# Trace exporter: none (default) | otlp-http | otlp-grpc
450trace_exporter = "none"625trace_exporter = "none"
626# Metrics exporter: none | statsig | otlp-http | otlp-grpc
627metrics_exporter = "statsig"
451 628
452# Example OTLP/HTTP exporter configuration629# Example OTLP/HTTP exporter configuration
453# [otel.exporter."otlp-http"]630# [otel.exporter."otlp-http"]
457# [otel.exporter."otlp-http".headers]634# [otel.exporter."otlp-http".headers]
458# "x-otlp-api-key" = "${OTLP_TOKEN}"635# "x-otlp-api-key" = "${OTLP_TOKEN}"
459 636
460# Example OTLP/gRPC exporter configuration
461# [otel.exporter."otlp-grpc"]
462# endpoint = "https://otel.example.com:4317",
463# headers = { "x-otlp-meta" = "abc123" }
464
465# Example OTLP exporter with mutual TLS
466# [otel.exporter."otlp-http"]
467# endpoint = "https://otel.example.com/v1/logs"
468# protocol = "binary"
469
470# [otel.exporter."otlp-http".headers]
471# "x-otlp-api-key" = "${OTLP_TOKEN}"
472
473# [otel.exporter."otlp-http".tls]637# [otel.exporter."otlp-http".tls]
474# ca-certificate = "certs/otel-ca.pem"638# ca-certificate = "certs/otel-ca.pem"
475# client-certificate = "/etc/codex/certs/client.pem"639# client-certificate = "/etc/codex/certs/client.pem"
476# client-private-key = "/etc/codex/certs/client-key.pem"640# client-private-key = "/etc/codex/certs/client-key.pem"
641
642# Example OTLP/gRPC trace exporter configuration
643# [otel.trace_exporter."otlp-grpc"]
644# endpoint = "https://otel.example.com:4317"
645# headers = { "x-otlp-meta" = "abc123" }
646
647################################################################################
648# Windows
649################################################################################
650
651[windows]
652# Native Windows sandbox mode (Windows only): unelevated | elevated
653sandbox = "unelevated"
477```654```