config-sample.md +247 −128
1# Sample Configuration1# Sample Configuration
2 2
33Use this example configuration as a starting point. It includes most keys Codex reads from `config.toml`, along with defaults and short notes.Use this example configuration as a starting point. It includes most keys Codex reads from `config.toml`, along with default behaviors, recommended values where helpful, and short notes.
4 4
5For explanations and guidance, see:5For explanations and guidance, see:
6 6
7- [Config basics](https://developers.openai.com/codex/config-basic)7- [Config basics](https://developers.openai.com/codex/config-basic)
8- [Advanced Config](https://developers.openai.com/codex/config-advanced)8- [Advanced Config](https://developers.openai.com/codex/config-advanced)
9- [Config Reference](https://developers.openai.com/codex/config-reference)9- [Config Reference](https://developers.openai.com/codex/config-reference)
1010- [Sandbox and approvals](https://developers.openai.com/codex/security#sandbox-and-approvals)- [Sandbox and approvals](https://developers.openai.com/codex/agent-approvals-security#sandbox-and-approvals)
1111- [Managed configuration](https://developers.openai.com/codex/security#managed-configuration)- [Managed configuration](https://developers.openai.com/codex/enterprise/managed-configuration)
12 12
13Use the snippet below as a reference. Copy only the keys and sections you need into `~/.codex/config.toml` (or into a project-scoped `.codex/config.toml`), then adjust values for your setup.13Use the snippet below as a reference. Copy only the keys and sections you need into `~/.codex/config.toml` (or into a project-scoped `.codex/config.toml`), then adjust values for your setup.
14 14
15```toml15```toml
16# Codex example configuration (config.toml)16# Codex example configuration (config.toml)
17#17#
1818# This file lists all keys Codex reads from config.toml, their default values,# This file lists the main keys Codex reads from config.toml, along with default
1919# and concise explanations. Values here mirror the effective defaults compiled# behaviors, recommended examples, and concise explanations. Adjust as needed.
20# into the CLI. Adjust as needed.
21#20#
22# Notes21# Notes
23# - Root keys must appear before tables in TOML.22# - Root keys must appear before tables in TOML.
28# Core Model Selection27# Core Model Selection
29################################################################################28################################################################################
30 29
3130# Primary model used by Codex. Default: "gpt-5.2-codex" on all platforms.# Primary model used by Codex. Recommended example for most users: "gpt-5.5".
3231model = "gpt-5.2-codex"model = "gpt-5.5"
33 32
3433# Default communication style for supported models. Default: "friendly".# Communication style for supported models. Allowed values: none | friendly | pragmatic
3534# Allowed values: none | friendly | pragmatic# personality = "pragmatic"
36# personality = "friendly"
37 35
38# Optional model override for /review. Default: unset (uses current session model).36# Optional model override for /review. Default: unset (uses current session model).
3937# review_model = "gpt-5.2-codex"# review_model = "gpt-5.5"
40 38
41# Provider id selected from [model_providers]. Default: "openai".39# Provider id selected from [model_providers]. Default: "openai".
42model_provider = "openai"40model_provider = "openai"
44# Default OSS provider for --oss sessions. When unset, Codex prompts. Default: unset.42# Default OSS provider for --oss sessions. When unset, Codex prompts. Default: unset.
45# oss_provider = "ollama"43# oss_provider = "ollama"
46 44
4745# Optional manual model metadata. When unset, Codex auto-detects from model.# Preferred service tier. `fast` is honored only when enabled in [features].
4846# Uncomment to force values.# service_tier = "flex" # fast | flex
47
48# Optional manual model metadata. When unset, Codex uses model or preset defaults.
49# model_context_window = 128000 # tokens; default: auto for model49# model_context_window = 128000 # tokens; default: auto for model
5050# model_auto_compact_token_limit = 0 # tokens; unset uses model defaults# model_auto_compact_token_limit = 64000 # tokens; unset uses model defaults
5151# tool_output_token_limit = 10000 # tokens stored per tool output; default: 10000 for gpt-5.2-codex# tool_output_token_limit = 12000 # tokens stored per tool output
52# model_catalog_json = "/absolute/path/to/models.json" # optional startup-only model catalog override52# model_catalog_json = "/absolute/path/to/models.json" # optional startup-only model catalog override
53# background_terminal_max_timeout = 300000 # ms; max empty write_stdin poll window (default 5m)53# background_terminal_max_timeout = 300000 # ms; max empty write_stdin poll window (default 5m)
54# log_dir = "/absolute/path/to/codex-logs" # directory for Codex logs; default: "$CODEX_HOME/log"54# log_dir = "/absolute/path/to/codex-logs" # directory for Codex logs; default: "$CODEX_HOME/log"
55# sqlite_home = "/absolute/path/to/codex-state" # optional SQLite-backed runtime state directory
55 56
56################################################################################57################################################################################
57# Reasoning & Verbosity (Responses API capable models)58# Reasoning & Verbosity (Responses API capable models)
58################################################################################59################################################################################
59 60
6061# Reasoning effort: minimal | low | medium | high | xhigh (default: medium; xhigh on gpt-5.2-codex and gpt-5.2)# Reasoning effort: minimal | low | medium | high | xhigh
6162model_reasoning_effort = "medium"# model_reasoning_effort = "medium"
63
64# Optional override used when Codex runs in plan mode: none | minimal | low | medium | high | xhigh
65# plan_mode_reasoning_effort = "high"
62 66
6367# Reasoning summary: auto | concise | detailed | none (default: auto)# Reasoning summary: auto | concise | detailed | none
64# model_reasoning_summary = "auto"68# model_reasoning_summary = "auto"
65 69
6670# Text verbosity for GPT-5 family (Responses API): low | medium | high (default: medium)# Text verbosity for GPT-5 family (Responses API): low | medium | high
67# model_verbosity = "medium"71# model_verbosity = "medium"
68 72
6973# Force enable or disable reasoning summaries for current model# Force enable or disable reasoning summaries for current model.
70# model_supports_reasoning_summaries = true74# model_supports_reasoning_summaries = true
71 75
72################################################################################76################################################################################
76# Additional user instructions are injected before AGENTS.md. Default: unset.80# Additional user instructions are injected before AGENTS.md. Default: unset.
77# developer_instructions = ""81# developer_instructions = ""
78 82
79# (Ignored) Optional legacy base instructions override (prefer AGENTS.md). Default: unset.
80# instructions = ""
81
82# Inline override for the history compaction prompt. Default: unset.83# Inline override for the history compaction prompt. Default: unset.
83# compact_prompt = ""84# compact_prompt = ""
84 85
86# Override the default commit co-author trailer. Set to "" to disable it.
87# commit_attribution = "Jane Doe <jane@example.com>"
88
85# Override built-in base instructions with a file path. Default: unset.89# Override built-in base instructions with a file path. Default: unset.
86# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"90# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"
87 91
88# Migration note: experimental_instructions_file was renamed to model_instructions_file (deprecated).
89
90# Load the compact prompt override from a file. Default: unset.92# Load the compact prompt override from a file. Default: unset.
91# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"93# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"
92 94
93# Legacy name for apply_patch_freeform. Default: false
94include_apply_patch_tool = false
95
96################################################################################95################################################################################
97# Notifications96# Notifications
98################################################################################97################################################################################
99 98
100# External notifier program (argv array). When unset: disabled.99# External notifier program (argv array). When unset: disabled.
101100# Example: notify = ["notify-send", "Codex"]# notify = ["notify-send", "Codex"]
102notify = [ ]
103 101
104################################################################################102################################################################################
105# Approval & Sandbox103# Approval & Sandbox
109# - untrusted: only known-safe read-only commands auto-run; others prompt107# - untrusted: only known-safe read-only commands auto-run; others prompt
110# - on-request: model decides when to ask (default)108# - on-request: model decides when to ask (default)
111# - never: never prompt (risky)109# - never: never prompt (risky)
112110# - { reject = { ... } }: auto-reject selected prompt categories# - { granular = { ... } }: allow or auto-reject selected prompt categories
113approval_policy = "on-request"111approval_policy = "on-request"
114112# Example granular auto-reject policy:# Who reviews eligible approval prompts: user (default) | auto_review
115113# approval_policy = { reject = { sandbox_approval = true, rules = false, mcp_elicitations = false } }# approvals_reviewer = "user"
114
115# Example granular policy:
116# approval_policy = { granular = {
117# sandbox_approval = true,
118# rules = true,
119# mcp_elicitations = true,
120# request_permissions = false,
121# skill_approval = false
122# } }
116 123
117# Allow login-shell semantics for shell-based tools when they request `login = true`.124# Allow login-shell semantics for shell-based tools when they request `login = true`.
118# Default: true. Set false to force non-login shells and reject explicit login-shell requests.125# Default: true. Set false to force non-login shells and reject explicit login-shell requests.
123# - workspace-write130# - workspace-write
124# - danger-full-access (no sandbox; extremely risky)131# - danger-full-access (no sandbox; extremely risky)
125sandbox_mode = "read-only"132sandbox_mode = "read-only"
133# Named permissions profile to apply by default. Built-ins:
134# :read-only | :workspace | :danger-no-sandbox
135# Use a custom name such as "workspace" only when you also define [permissions.workspace].
136# default_permissions = ":workspace"
137
138# Example filesystem profile. Use `"none"` to deny reads for exact paths or
139# glob patterns. On platforms that need pre-expanded glob matches, set
140# glob_scan_max_depth when using unbounded patterns such as `**`.
141# [permissions.workspace.filesystem]
142# glob_scan_max_depth = 3
143# ":project_roots" = { "." = "write", "**/*.env" = "none" }
144# "/absolute/path/to/secrets" = "none"
126 145
127################################################################################146################################################################################
128# Authentication & Login147# Authentication & Login
131# Where to persist CLI login credentials: file (default) | keyring | auto150# Where to persist CLI login credentials: file (default) | keyring | auto
132cli_auth_credentials_store = "file"151cli_auth_credentials_store = "file"
133 152
134153# Base URL for ChatGPT auth flow (not OpenAI API). Default:# Base URL for ChatGPT auth flow (not OpenAI API).
135chatgpt_base_url = "https://chatgpt.com/backend-api/"154chatgpt_base_url = "https://chatgpt.com/backend-api/"
136 155
156# Optional base URL override for the built-in OpenAI provider.
157# openai_base_url = "https://us.api.openai.com/v1"
158
137# Restrict ChatGPT login to a specific workspace id. Default: unset.159# Restrict ChatGPT login to a specific workspace id. Default: unset.
138160# forced_chatgpt_workspace_id = ""# forced_chatgpt_workspace_id = "00000000-0000-0000-0000-000000000000"
139 161
140# Force login mechanism when Codex would normally auto-select. Default: unset.162# Force login mechanism when Codex would normally auto-select. Default: unset.
141# Allowed values: chatgpt | api163# Allowed values: chatgpt | api
200# If you use --yolo or another full access sandbox setting, web search defaults to live.220# If you use --yolo or another full access sandbox setting, web search defaults to live.
201web_search = "cached"221web_search = "cached"
202 222
203################################################################################
204# Profiles (named presets)
205################################################################################
206
207# Active profile name. When unset, no profile is applied.223# Active profile name. When unset, no profile is applied.
208# profile = "default"224# profile = "default"
209 225
226# Suppress the warning shown when under-development feature flags are enabled.
227# suppress_unstable_features_warning = true
228
210################################################################################229################################################################################
211# Agents (multi-agent roles and limits)230# Agents (multi-agent roles and limits)
212################################################################################231################################################################################
213 232
214233# [agents][agents]
215# Maximum concurrently open agent threads. Default: 6234# Maximum concurrently open agent threads. Default: 6
216# max_threads = 6235# max_threads = 6
217# Maximum nested spawn depth. Root session starts at depth 0. Default: 1236# Maximum nested spawn depth. Root session starts at depth 0. Default: 1
218# max_depth = 1237# max_depth = 1
238# Default timeout per worker for spawn_agents_on_csv jobs. When unset, the tool defaults to 1800 seconds.
239# job_max_runtime_seconds = 1800
219 240
220# [agents.reviewer]241# [agents.reviewer]
221242# description = "Find security, correctness, and test risks in code."# description = "Find correctness, security, and test risks in code."
222# config_file = "./agents/reviewer.toml" # relative to the config.toml that defines it243# config_file = "./agents/reviewer.toml" # relative to the config.toml that defines it
244# nickname_candidates = ["Athena", "Ada"]
223 245
224################################################################################246################################################################################
225# Skills (per-skill overrides)247# Skills (per-skill overrides)
227 249
228# Disable or re-enable a specific skill without deleting it.250# Disable or re-enable a specific skill without deleting it.
229[[skills.config]]251[[skills.config]]
230252# path = "/path/to/skill"# path = "/path/to/skill/SKILL.md"
231# enabled = false253# enabled = false
232 254
233################################################################################
234# Experimental toggles (legacy; prefer [features])
235################################################################################
236
237experimental_use_unified_exec_tool = false
238
239# Include apply_patch via freeform editing path (affects default tool set). Default: false
240experimental_use_freeform_apply_patch = false
241
242################################################################################255################################################################################
243# Sandbox settings (tables)256# Sandbox settings (tables)
244################################################################################257################################################################################
261[shell_environment_policy]274[shell_environment_policy]
262# inherit: all (default) | core | none275# inherit: all (default) | core | none
263inherit = "all"276inherit = "all"
264277# Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: true# Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: false
265278ignore_default_excludes = trueignore_default_excludes = false
266# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []279# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []
267exclude = []280exclude = []
268# Explicit key/value overrides (always win). Default: {}281# Explicit key/value overrides (always win). Default: {}
272# Experimental: run via user shell profile. Default: false285# Experimental: run via user shell profile. Default: false
273experimental_use_profile = false286experimental_use_profile = false
274 287
288################################################################################
289# Managed network proxy settings
290################################################################################
291
292# Set `default_permissions = "workspace"` before enabling this profile.
293# [permissions.workspace.network]
294# enabled = true
295# proxy_url = "http://127.0.0.1:43128"
296# admin_url = "http://127.0.0.1:43129"
297# enable_socks5 = false
298# socks_url = "http://127.0.0.1:43130"
299# enable_socks5_udp = false
300# allow_upstream_proxy = false
301# dangerously_allow_non_loopback_proxy = false
302# dangerously_allow_non_loopback_admin = false
303# dangerously_allow_all_unix_sockets = false
304# mode = "limited" # limited | full
305# allow_local_binding = false
306#
307# [permissions.workspace.network.domains]
308# "api.openai.com" = "allow"
309# "example.com" = "deny"
310#
311# [permissions.workspace.network.unix_sockets]
312# "/var/run/docker.sock" = "allow"
313
275################################################################################314################################################################################
276# History (table)315# History (table)
277################################################################################316################################################################################
280# save-all (default) | none319# save-all (default) | none
281persistence = "save-all"320persistence = "save-all"
282# Maximum bytes for history file; oldest entries are trimmed when exceeded. Example: 5242880321# Maximum bytes for history file; oldest entries are trimmed when exceeded. Example: 5242880
283322# max_bytes = 0# max_bytes = 5242880
284 323
285################################################################################324################################################################################
286# UI, Notifications, and Misc (tables)325# UI, Notifications, and Misc (tables)
294# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"333# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"
295# notification_method = "auto"334# notification_method = "auto"
296 335
336# When notifications fire: unfocused (default) | always
337# notification_condition = "unfocused"
338
297# Enables welcome/status/spinner animations. Default: true339# Enables welcome/status/spinner animations. Default: true
298animations = true340animations = true
299 341
308# Set to [] to hide the footer.350# Set to [] to hide the footer.
309# status_line = ["model", "context-remaining", "git-branch"]351# status_line = ["model", "context-remaining", "git-branch"]
310 352
353# Ordered list of terminal window/tab title item IDs. When unset, Codex uses:
354# ["spinner", "project"]. Set to [] to clear the title.
355# Available IDs include app-name, project, spinner, status, thread, git-branch, model,
356# and task-progress.
357# terminal_title = ["spinner", "project"]
358
311# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.359# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.
312# You can also add custom .tmTheme files under $CODEX_HOME/themes.360# You can also add custom .tmTheme files under $CODEX_HOME/themes.
313# theme = "catppuccin-mocha"361# theme = "catppuccin-mocha"
314 362
363# Custom key bindings. Context-specific bindings override [tui.keymap.global].
364# Use [] to unbind an action.
365# [tui.keymap.global]
366# open_transcript = "ctrl-t"
367# open_external_editor = []
368#
369# [tui.keymap.composer]
370# submit = ["enter", "ctrl-m"]
371
372# Internal tooltip state keyed by model slug. Usually managed by Codex.
373# [tui.model_availability_nux]
374# "gpt-5.4" = 1
375
376# Enable or disable analytics for this machine. When unset, Codex uses its default behavior.
377[analytics]
378enabled = true
379
315# Control whether users can submit feedback from `/feedback`. Default: true380# Control whether users can submit feedback from `/feedback`. Default: true
316[feedback]381[feedback]
317enabled = true382enabled = true
323# hide_rate_limit_model_nudge = true388# hide_rate_limit_model_nudge = true
324# hide_gpt5_1_migration_prompt = true389# hide_gpt5_1_migration_prompt = true
325# "hide_gpt-5.1-codex-max_migration_prompt" = true390# "hide_gpt-5.1-codex-max_migration_prompt" = true
326391# model_migrations = { "gpt-4.1" = "gpt-5.1" }# model_migrations = { "gpt-5.3-codex" = "gpt-5.4" }
327
328# Suppress the warning shown when under-development feature flags are enabled.
329# suppress_unstable_features_warning = true
330 392
331################################################################################393################################################################################
332# Centralized Feature Flags (preferred)394# Centralized Feature Flags (preferred)
336# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.398# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.
337# shell_tool = true399# shell_tool = true
338# apps = false400# apps = false
339401# apps_mcp_gateway = false# codex_hooks = false
340402# web_search_cached = false# unified_exec = true
341403# web_search_request = false# shell_snapshot = true
342404# unified_exec = false# multi_agent = true
343# shell_snapshot = false
344# apply_patch_freeform = false
345# multi_agent = false
346# search_tool = false
347# personality = true405# personality = true
348406# request_rule = true# fast_mode = true
349407# collaboration_modes = true# enable_request_compression = true
350408# use_linux_sandbox_bwrap = false# skill_mcp_dependency_install = true
351409# remote_models = false# prevent_idle_sleep = false
352410# runtime_metrics = false
353411# powershell_utf8 = true################################################################################
354412# child_agents_md = false# Memories (table)
413################################################################################
414
415# Enable memories with [features].memories, then tune memory behavior here.
416# [memories]
417# generate_memories = true
418# use_memories = true
419# disable_on_external_context = false # legacy alias: no_memories_if_mcp_or_web_search
420
421################################################################################
422# Lifecycle hooks can be configured here inline or in a sibling hooks.json.
423################################################################################
424
425# [hooks]
426# [[hooks.PreToolUse]]
427# matcher = "^Bash$"
428#
429# [[hooks.PreToolUse.hooks]]
430# type = "command"
431# command = 'python3 "/absolute/path/to/pre_tool_use_policy.py"'
432# timeout = 30
433# statusMessage = "Checking Bash command"
355 434
356################################################################################435################################################################################
357# Define MCP servers under this table. Leave empty to disable.436# Define MCP servers under this table. Leave empty to disable.
366# command = "docs-server" # required445# command = "docs-server" # required
367# args = ["--port", "4000"] # optional446# args = ["--port", "4000"] # optional
368# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is447# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is
369448# env_vars = ["ANOTHER_SECRET"] # optional: forward these from the parent env# env_vars = ["ANOTHER_SECRET"] # optional: forward local parent env vars
449# env_vars = ["LOCAL_TOKEN", { name = "REMOTE_TOKEN", source = "remote" }]
370# cwd = "/path/to/server" # optional working directory override450# cwd = "/path/to/server" # optional working directory override
451# experimental_environment = "remote" # experimental: run stdio via a remote executor
371# startup_timeout_sec = 10.0 # optional; default 10.0 seconds452# startup_timeout_sec = 10.0 # optional; default 10.0 seconds
372# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)453# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)
373# tool_timeout_sec = 60.0 # optional; default 60.0 seconds454# tool_timeout_sec = 60.0 # optional; default 60.0 seconds
374# enabled_tools = ["search", "summarize"] # optional allow-list455# enabled_tools = ["search", "summarize"] # optional allow-list
375# disabled_tools = ["slow-tool"] # optional deny-list (applied after allow-list)456# disabled_tools = ["slow-tool"] # optional deny-list (applied after allow-list)
457# scopes = ["read:docs"] # optional OAuth scopes
458# oauth_resource = "https://docs.example.com/" # optional OAuth resource
376 459
377# --- Example: Streamable HTTP transport ---460# --- Example: Streamable HTTP transport ---
378# [mcp_servers.github]461# [mcp_servers.github]
385# startup_timeout_sec = 10.0 # optional468# startup_timeout_sec = 10.0 # optional
386# tool_timeout_sec = 60.0 # optional469# tool_timeout_sec = 60.0 # optional
387# enabled_tools = ["list_issues"] # optional allow-list470# enabled_tools = ["list_issues"] # optional allow-list
471# disabled_tools = ["delete_issue"] # optional deny-list
472# scopes = ["repo"] # optional OAuth scopes
388 473
389################################################################################474################################################################################
390# Model Providers475# Model Providers
391################################################################################476################################################################################
392 477
393# Built-ins include:478# Built-ins include:
394479# - openai (Responses API; requires login or OPENAI_API_KEY via auth flow)# - openai
395480# - oss (Chat Completions API; defaults to http://localhost:11434/v1)# - ollama
481# - lmstudio
482# - amazon-bedrock
483# These IDs are reserved. Use a different ID for custom providers.
396 484
397[model_providers]485[model_providers]
398 486
487# --- Example: built-in Amazon Bedrock provider options ---
488# model_provider = "amazon-bedrock"
489# model = "<bedrock-model-id>"
490# [model_providers.amazon-bedrock.aws]
491# profile = "default"
492# region = "eu-central-1"
493
399# --- Example: OpenAI data residency with explicit base URL or headers ---494# --- Example: OpenAI data residency with explicit base URL or headers ---
400# [model_providers.openaidr]495# [model_providers.openaidr]
401# name = "OpenAI Data Residency"496# name = "OpenAI Data Residency"
402# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix497# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix
403498# wire_api = "responses" # "responses" | "chat" (default varies)# wire_api = "responses" # only supported value
404499# # requires_openai_auth = true # built-in OpenAI defaults to true# # requires_openai_auth = true # use only for providers backed by OpenAI auth
405# # request_max_retries = 4 # default 4; max 100500# # request_max_retries = 4 # default 4; max 100
406# # stream_max_retries = 5 # default 5; max 100501# # stream_max_retries = 5 # default 5; max 100
407# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)502# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)
503# # supports_websockets = true # optional
408# # experimental_bearer_token = "sk-example" # optional dev-only direct bearer token504# # experimental_bearer_token = "sk-example" # optional dev-only direct bearer token
409# # http_headers = { "X-Example" = "value" }505# # http_headers = { "X-Example" = "value" }
410# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }506# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }
411 507
412508# --- Example: Azure (Chat/Responses depending on endpoint) ---# --- Example: Azure/OpenAI-compatible provider ---
413# [model_providers.azure]509# [model_providers.azure]
414# name = "Azure"510# name = "Azure"
415# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"511# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"
416512# wire_api = "responses" # or "chat" per endpoint# wire_api = "responses"
417# query_params = { api-version = "2025-04-01-preview" }513# query_params = { api-version = "2025-04-01-preview" }
418# env_key = "AZURE_OPENAI_API_KEY"514# env_key = "AZURE_OPENAI_API_KEY"
419515# # env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"
516# # supports_websockets = false
517
518# --- Example: command-backed bearer token auth ---
519# [model_providers.proxy]
520# name = "OpenAI using LLM proxy"
521# base_url = "https://proxy.example.com/v1"
522# wire_api = "responses"
523#
524# [model_providers.proxy.auth]
525# command = "/usr/local/bin/fetch-codex-token"
526# args = ["--audience", "codex"]
527# timeout_ms = 5000
528# refresh_interval_ms = 300000
420 529
421# --- Example: Local OSS (e.g., Ollama-compatible) ---530# --- Example: Local OSS (e.g., Ollama-compatible) ---
422531# [model_providers.ollama]# [model_providers.local_ollama]
423# name = "Ollama"532# name = "Ollama"
424# base_url = "http://localhost:11434/v1"533# base_url = "http://localhost:11434/v1"
425534# wire_api = "chat"# wire_api = "responses"
426
427################################################################################
428# Profiles (named presets)
429################################################################################
430
431[profiles]
432
433# [profiles.default]
434# model = "gpt-5.2-codex"
435# model_provider = "openai"
436# approval_policy = "on-request"
437# sandbox_mode = "read-only"
438# oss_provider = "ollama"
439# model_reasoning_effort = "medium"
440# model_reasoning_summary = "auto"
441# model_verbosity = "medium"
442# personality = "friendly" # or "pragmatic" or "none"
443# chatgpt_base_url = "https://chatgpt.com/backend-api/"
444# model_catalog_json = "./models.json"
445# experimental_compact_prompt_file = "./compact_prompt.txt"
446# include_apply_patch_tool = false
447# experimental_use_unified_exec_tool = false
448# experimental_use_freeform_apply_patch = false
449# tools.web_search = false # deprecated legacy alias; prefer top-level `web_search`
450# features = { unified_exec = false }
451 535
452################################################################################536################################################################################
453# Apps / Connectors537# Apps / Connectors
471# enabled = false555# enabled = false
472# approval_mode = "approve"556# approval_mode = "approve"
473 557
558# Optional tool suggestion allowlist for connectors or plugins Codex can offer to install.
559# [tool_suggest]
560# discoverables = [
561# { type = "connector", id = "gmail" },
562# { type = "plugin", id = "figma@openai-curated" },
563# ]
564# disabled_tools = [
565# { type = "plugin", id = "slack@openai-curated" },
566# { type = "connector", id = "connector_googlecalendar" },
567# ]
568
569################################################################################
570# Profiles (named presets)
571################################################################################
572
573[profiles]
574
575# [profiles.default]
576# model = "gpt-5.4"
577# model_provider = "openai"
578# approval_policy = "on-request"
579# sandbox_mode = "read-only"
580# service_tier = "flex"
581# oss_provider = "ollama"
582# model_reasoning_effort = "medium"
583# plan_mode_reasoning_effort = "high"
584# model_reasoning_summary = "auto"
585# model_verbosity = "medium"
586# personality = "pragmatic" # or "friendly" or "none"
587# chatgpt_base_url = "https://chatgpt.com/backend-api/"
588# model_catalog_json = "./models.json"
589# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"
590# experimental_compact_prompt_file = "./compact_prompt.txt"
591# tools_view_image = true
592# features = { unified_exec = false }
593
474################################################################################594################################################################################
475# Projects (trust levels)595# Projects (trust levels)
476################################################################################596################################################################################
477 597
478# Mark specific worktrees as trusted or untrusted.
479[projects]598[projects]
599# Mark specific worktrees as trusted or untrusted.
480# [projects."/absolute/path/to/project"]600# [projects."/absolute/path/to/project"]
481# trust_level = "trusted" # or "untrusted"601# trust_level = "trusted" # or "untrusted"
482 602
603################################################################################
604# Tools
605################################################################################
606
607[tools]
608# view_image = true
609
483################################################################################610################################################################################
484# OpenTelemetry (OTEL) - disabled by default611# OpenTelemetry (OTEL) - disabled by default
485################################################################################612################################################################################
493exporter = "none"620exporter = "none"
494# Trace exporter: none (default) | otlp-http | otlp-grpc621# Trace exporter: none (default) | otlp-http | otlp-grpc
495trace_exporter = "none"622trace_exporter = "none"
623# Metrics exporter: none | statsig | otlp-http | otlp-grpc
624metrics_exporter = "statsig"
496 625
497# Example OTLP/HTTP exporter configuration626# Example OTLP/HTTP exporter configuration
498# [otel.exporter."otlp-http"]627# [otel.exporter."otlp-http"]
502# [otel.exporter."otlp-http".headers]631# [otel.exporter."otlp-http".headers]
503# "x-otlp-api-key" = "${OTLP_TOKEN}"632# "x-otlp-api-key" = "${OTLP_TOKEN}"
504 633
505# Example OTLP/gRPC exporter configuration
506# [otel.exporter."otlp-grpc"]
507# endpoint = "https://otel.example.com:4317",
508# headers = { "x-otlp-meta" = "abc123" }
509
510# Example OTLP exporter with mutual TLS
511# [otel.exporter."otlp-http"]
512# endpoint = "https://otel.example.com/v1/logs"
513# protocol = "binary"
514
515# [otel.exporter."otlp-http".headers]
516# "x-otlp-api-key" = "${OTLP_TOKEN}"
517
518# [otel.exporter."otlp-http".tls]634# [otel.exporter."otlp-http".tls]
519# ca-certificate = "certs/otel-ca.pem"635# ca-certificate = "certs/otel-ca.pem"
520# client-certificate = "/etc/codex/certs/client.pem"636# client-certificate = "/etc/codex/certs/client.pem"
521# client-private-key = "/etc/codex/certs/client-key.pem"637# client-private-key = "/etc/codex/certs/client-key.pem"
522```
523 638
524639################################################################################# Example OTLP/gRPC trace exporter configuration
640# [otel.trace_exporter."otlp-grpc"]
641# endpoint = "https://otel.example.com:4317"
642# headers = { "x-otlp-meta" = "abc123" }
525 643
644################################################################################
526# Windows645# Windows
527
528################################################################################646################################################################################
529 647
530[windows]648[windows]
531
532# Native Windows sandbox mode (Windows only): unelevated | elevated649# Native Windows sandbox mode (Windows only): unelevated | elevated
533
534sandbox = "unelevated"650sandbox = "unelevated"
651```