SpyBara
Go Premium Account
2026
4 Mar 2026, 06:20
14 May 2026, 21:00 14 May 2026, 07:00 13 May 2026, 00:57 12 May 2026, 01:59 11 May 2026, 18:00 7 May 2026, 20:02 7 May 2026, 17:08 5 May 2026, 23:00 2 May 2026, 06:45 2 May 2026, 00:48 1 May 2026, 18:29 30 Apr 2026, 18:36 29 Apr 2026, 12:40 29 Apr 2026, 00:50 25 Apr 2026, 06:37 25 Apr 2026, 00:42 24 Apr 2026, 18:20 24 Apr 2026, 12:28 23 Apr 2026, 18:31 23 Apr 2026, 12:28 23 Apr 2026, 00:46 22 Apr 2026, 18:29 22 Apr 2026, 00:42 21 Apr 2026, 18:29 21 Apr 2026, 12:30 21 Apr 2026, 06:45 20 Apr 2026, 18:26 20 Apr 2026, 06:53 18 Apr 2026, 18:18 17 Apr 2026, 00:44 16 Apr 2026, 18:31 16 Apr 2026, 00:46 15 Apr 2026, 18:31 15 Apr 2026, 06:44 14 Apr 2026, 18:31 14 Apr 2026, 12:29 13 Apr 2026, 18:37 13 Apr 2026, 00:44 12 Apr 2026, 06:38 10 Apr 2026, 18:23 9 Apr 2026, 00:33 8 Apr 2026, 18:32 8 Apr 2026, 00:40 7 Apr 2026, 00:40 2 Apr 2026, 18:23 31 Mar 2026, 06:35 31 Mar 2026, 00:39 28 Mar 2026, 06:26 28 Mar 2026, 00:36 27 Mar 2026, 18:23 27 Mar 2026, 00:39 26 Mar 2026, 18:27 25 Mar 2026, 18:24 23 Mar 2026, 18:22 20 Mar 2026, 00:35 18 Mar 2026, 12:23 18 Mar 2026, 00:36 17 Mar 2026, 18:24 17 Mar 2026, 00:33 16 Mar 2026, 18:25 16 Mar 2026, 12:23 14 Mar 2026, 00:32 13 Mar 2026, 18:15 13 Mar 2026, 00:34 11 Mar 2026, 00:31 9 Mar 2026, 00:34 8 Mar 2026, 18:10 8 Mar 2026, 00:35 7 Mar 2026, 18:10 7 Mar 2026, 06:14 7 Mar 2026, 00:33 6 Mar 2026, 00:38 5 Mar 2026, 18:41 5 Mar 2026, 06:22 5 Mar 2026, 00:34 4 Mar 2026, 18:18 4 Mar 2026, 06:20 3 Mar 2026, 18:20 3 Mar 2026, 00:35 27 Feb 2026, 18:15 24 Feb 2026, 06:27 24 Feb 2026, 00:33 23 Feb 2026, 18:27 21 Feb 2026, 00:33 20 Feb 2026, 12:16 19 Feb 2026, 20:53 19 Feb 2026, 20:37
30 Apr 2026, 18:36
14 May 2026, 21:00 14 May 2026, 07:00 13 May 2026, 00:57 12 May 2026, 01:59 11 May 2026, 18:00 7 May 2026, 20:02 7 May 2026, 17:08 5 May 2026, 23:00 2 May 2026, 06:45 2 May 2026, 00:48 1 May 2026, 18:29 30 Apr 2026, 18:36 29 Apr 2026, 12:40 29 Apr 2026, 00:50 25 Apr 2026, 06:37 25 Apr 2026, 00:42 24 Apr 2026, 18:20 24 Apr 2026, 12:28 23 Apr 2026, 18:31 23 Apr 2026, 12:28 23 Apr 2026, 00:46 22 Apr 2026, 18:29 22 Apr 2026, 00:42 21 Apr 2026, 18:29 21 Apr 2026, 12:30 21 Apr 2026, 06:45 20 Apr 2026, 18:26 20 Apr 2026, 06:53 18 Apr 2026, 18:18 17 Apr 2026, 00:44 16 Apr 2026, 18:31 16 Apr 2026, 00:46 15 Apr 2026, 18:31 15 Apr 2026, 06:44 14 Apr 2026, 18:31 14 Apr 2026, 12:29 13 Apr 2026, 18:37 13 Apr 2026, 00:44 12 Apr 2026, 06:38 10 Apr 2026, 18:23 9 Apr 2026, 00:33 8 Apr 2026, 18:32 8 Apr 2026, 00:40 7 Apr 2026, 00:40 2 Apr 2026, 18:23 31 Mar 2026, 06:35 31 Mar 2026, 00:39 28 Mar 2026, 06:26 28 Mar 2026, 00:36 27 Mar 2026, 18:23 27 Mar 2026, 00:39 26 Mar 2026, 18:27 25 Mar 2026, 18:24 23 Mar 2026, 18:22 20 Mar 2026, 00:35 18 Mar 2026, 12:23 18 Mar 2026, 00:36 17 Mar 2026, 18:24 17 Mar 2026, 00:33 16 Mar 2026, 18:25 16 Mar 2026, 12:23 14 Mar 2026, 00:32 13 Mar 2026, 18:15 13 Mar 2026, 00:34 11 Mar 2026, 00:31 9 Mar 2026, 00:34 8 Mar 2026, 18:10 8 Mar 2026, 00:35 7 Mar 2026, 18:10 7 Mar 2026, 06:14 7 Mar 2026, 00:33 6 Mar 2026, 00:38 5 Mar 2026, 18:41 5 Mar 2026, 06:22 5 Mar 2026, 00:34 4 Mar 2026, 18:18 4 Mar 2026, 06:20 3 Mar 2026, 18:20 3 Mar 2026, 00:35 27 Feb 2026, 18:15 24 Feb 2026, 06:27 24 Feb 2026, 00:33 23 Feb 2026, 18:27 21 Feb 2026, 00:33 20 Feb 2026, 12:16 19 Feb 2026, 20:53 19 Feb 2026, 20:37
Thu 2 18:23 Tue 7 00:40 Wed 8 00:40 Wed 8 18:32 Thu 9 00:33 Fri 10 18:23 Sun 12 06:38 Mon 13 00:44 Mon 13 18:37 Tue 14 12:29 Tue 14 18:31 Wed 15 06:44 Wed 15 18:31 Thu 16 00:46 Thu 16 18:31 Fri 17 00:44 Sat 18 18:18 Mon 20 06:53 Mon 20 18:26 Tue 21 06:45 Tue 21 12:30 Tue 21 18:29 Wed 22 00:42 Wed 22 18:29 Thu 23 00:46 Thu 23 12:28 Thu 23 18:31 Fri 24 12:28 Fri 24 18:20 Sat 25 00:42 Sat 25 06:37 Wed 29 00:50 Wed 29 12:40 Thu 30 18:36

config-sample.md +243 −127

Details

1# Sample Configuration1# Sample Configuration

2 2 

3Use this example configuration as a starting point. It includes most keys Codex reads from `config.toml`, along with defaults and short notes.3Use this example configuration as a starting point. It includes most keys Codex reads from `config.toml`, along with default behaviors, recommended values where helpful, and short notes.

4 4 

5For explanations and guidance, see:5For explanations and guidance, see:

6 6 

7- [Config basics](https://developers.openai.com/codex/config-basic)7- [Config basics](https://developers.openai.com/codex/config-basic)

8- [Advanced Config](https://developers.openai.com/codex/config-advanced)8- [Advanced Config](https://developers.openai.com/codex/config-advanced)

9- [Config Reference](https://developers.openai.com/codex/config-reference)9- [Config Reference](https://developers.openai.com/codex/config-reference)

10- [Sandbox and approvals](https://developers.openai.com/codex/security#sandbox-and-approvals)10- [Sandbox and approvals](https://developers.openai.com/codex/agent-approvals-security#sandbox-and-approvals)

11- [Managed configuration](https://developers.openai.com/codex/security#managed-configuration)11- [Managed configuration](https://developers.openai.com/codex/enterprise/managed-configuration)

12 12 

13Use the snippet below as a reference. Copy only the keys and sections you need into `~/.codex/config.toml` (or into a project-scoped `.codex/config.toml`), then adjust values for your setup.13Use the snippet below as a reference. Copy only the keys and sections you need into `~/.codex/config.toml` (or into a project-scoped `.codex/config.toml`), then adjust values for your setup.

14 14 

15```toml15```toml

16# Codex example configuration (config.toml)16# Codex example configuration (config.toml)

17#17#

18# This file lists all keys Codex reads from config.toml, their default values,18# This file lists the main keys Codex reads from config.toml, along with default

19# and concise explanations. Values here mirror the effective defaults compiled19# behaviors, recommended examples, and concise explanations. Adjust as needed.

20# into the CLI. Adjust as needed.

21#20#

22# Notes21# Notes

23# - Root keys must appear before tables in TOML.22# - Root keys must appear before tables in TOML.


28# Core Model Selection27# Core Model Selection

29################################################################################28################################################################################

30 29 

31# Primary model used by Codex. Default: "gpt-5.2-codex" on all platforms.30# Primary model used by Codex. Recommended example for most users: "gpt-5.5".

32model = "gpt-5.2-codex"31model = "gpt-5.5"

33 32 

34# Default communication style for supported models. Default: "friendly".33# Communication style for supported models. Allowed values: none | friendly | pragmatic

35# Allowed values: none | friendly | pragmatic34# personality = "pragmatic"

36# personality = "friendly"

37 35 

38# Optional model override for /review. Default: unset (uses current session model).36# Optional model override for /review. Default: unset (uses current session model).

39# review_model = "gpt-5.2-codex"37# review_model = "gpt-5.5"

40 38 

41# Provider id selected from [model_providers]. Default: "openai".39# Provider id selected from [model_providers]. Default: "openai".

42model_provider = "openai"40model_provider = "openai"


44# Default OSS provider for --oss sessions. When unset, Codex prompts. Default: unset.42# Default OSS provider for --oss sessions. When unset, Codex prompts. Default: unset.

45# oss_provider = "ollama"43# oss_provider = "ollama"

46 44 

47# Optional manual model metadata. When unset, Codex auto-detects from model.45# Preferred service tier. `fast` is honored only when enabled in [features].

48# Uncomment to force values.46# service_tier = "flex" # fast | flex

47 

48# Optional manual model metadata. When unset, Codex uses model or preset defaults.

49# model_context_window = 128000 # tokens; default: auto for model49# model_context_window = 128000 # tokens; default: auto for model

50# model_auto_compact_token_limit = 0 # tokens; unset uses model defaults50# model_auto_compact_token_limit = 64000 # tokens; unset uses model defaults

51# tool_output_token_limit = 10000 # tokens stored per tool output; default: 10000 for gpt-5.2-codex51# tool_output_token_limit = 12000 # tokens stored per tool output

52# model_catalog_json = "/absolute/path/to/models.json" # optional startup-only model catalog override52# model_catalog_json = "/absolute/path/to/models.json" # optional startup-only model catalog override

53# background_terminal_max_timeout = 300000 # ms; max empty write_stdin poll window (default 5m)53# background_terminal_max_timeout = 300000 # ms; max empty write_stdin poll window (default 5m)

54# log_dir = "/absolute/path/to/codex-logs" # directory for Codex logs; default: "$CODEX_HOME/log"54# log_dir = "/absolute/path/to/codex-logs" # directory for Codex logs; default: "$CODEX_HOME/log"


58# Reasoning & Verbosity (Responses API capable models)58# Reasoning & Verbosity (Responses API capable models)

59################################################################################59################################################################################

60 60 

61# Reasoning effort: minimal | low | medium | high | xhigh (default: medium; xhigh on gpt-5.2-codex and gpt-5.2)61# Reasoning effort: minimal | low | medium | high | xhigh

62model_reasoning_effort = "medium"62# model_reasoning_effort = "medium"

63 

64# Optional override used when Codex runs in plan mode: none | minimal | low | medium | high | xhigh

65# plan_mode_reasoning_effort = "high"

63 66 

64# Reasoning summary: auto | concise | detailed | none (default: auto)67# Reasoning summary: auto | concise | detailed | none

65# model_reasoning_summary = "auto"68# model_reasoning_summary = "auto"

66 69 

67# Text verbosity for GPT-5 family (Responses API): low | medium | high (default: medium)70# Text verbosity for GPT-5 family (Responses API): low | medium | high

68# model_verbosity = "medium"71# model_verbosity = "medium"

69 72 

70# Force enable or disable reasoning summaries for current model73# Force enable or disable reasoning summaries for current model.

71# model_supports_reasoning_summaries = true74# model_supports_reasoning_summaries = true

72 75 

73################################################################################76################################################################################


77# Additional user instructions are injected before AGENTS.md. Default: unset.80# Additional user instructions are injected before AGENTS.md. Default: unset.

78# developer_instructions = ""81# developer_instructions = ""

79 82 

80# (Ignored) Optional legacy base instructions override (prefer AGENTS.md). Default: unset.

81# instructions = ""

82 

83# Inline override for the history compaction prompt. Default: unset.83# Inline override for the history compaction prompt. Default: unset.

84# compact_prompt = ""84# compact_prompt = ""

85 85 

86# Override the default commit co-author trailer. Set to "" to disable it.

87# commit_attribution = "Jane Doe <jane@example.com>"

88 

86# Override built-in base instructions with a file path. Default: unset.89# Override built-in base instructions with a file path. Default: unset.

87# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"90# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"

88 91 

89# Migration note: experimental_instructions_file was renamed to model_instructions_file (deprecated).

90 

91# Load the compact prompt override from a file. Default: unset.92# Load the compact prompt override from a file. Default: unset.

92# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"93# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"

93 94 

94# Legacy name for apply_patch_freeform. Default: false

95include_apply_patch_tool = false

96 

97################################################################################95################################################################################

98# Notifications96# Notifications

99################################################################################97################################################################################

100 98 

101# External notifier program (argv array). When unset: disabled.99# External notifier program (argv array). When unset: disabled.

102# Example: notify = ["notify-send", "Codex"]100# notify = ["notify-send", "Codex"]

103notify = [ ]

104 101 

105################################################################################102################################################################################

106# Approval & Sandbox103# Approval & Sandbox


110# - untrusted: only known-safe read-only commands auto-run; others prompt107# - untrusted: only known-safe read-only commands auto-run; others prompt

111# - on-request: model decides when to ask (default)108# - on-request: model decides when to ask (default)

112# - never: never prompt (risky)109# - never: never prompt (risky)

113# - { reject = { ... } }: auto-reject selected prompt categories110# - { granular = { ... } }: allow or auto-reject selected prompt categories

114approval_policy = "on-request"111approval_policy = "on-request"

115# Example granular auto-reject policy:112# Who reviews eligible approval prompts: user (default) | auto_review

116# approval_policy = { reject = { sandbox_approval = true, rules = false, mcp_elicitations = false } }113# approvals_reviewer = "user"

114 

115# Example granular policy:

116# approval_policy = { granular = {

117# sandbox_approval = true,

118# rules = true,

119# mcp_elicitations = true,

120# request_permissions = false,

121# skill_approval = false

122# } }

117 123 

118# Allow login-shell semantics for shell-based tools when they request `login = true`.124# Allow login-shell semantics for shell-based tools when they request `login = true`.

119# Default: true. Set false to force non-login shells and reject explicit login-shell requests.125# Default: true. Set false to force non-login shells and reject explicit login-shell requests.


124# - workspace-write130# - workspace-write

125# - danger-full-access (no sandbox; extremely risky)131# - danger-full-access (no sandbox; extremely risky)

126sandbox_mode = "read-only"132sandbox_mode = "read-only"

133# Named permissions profile to apply by default. Built-ins:

134# :read-only | :workspace | :danger-no-sandbox

135# Use a custom name such as "workspace" only when you also define [permissions.workspace].

136# default_permissions = ":workspace"

137 

138# Example filesystem profile. Use `"none"` to deny reads for exact paths or

139# glob patterns. On platforms that need pre-expanded glob matches, set

140# glob_scan_max_depth when using unbounded patterns such as `**`.

141# [permissions.workspace.filesystem]

142# glob_scan_max_depth = 3

143# ":project_roots" = { "." = "write", "**/*.env" = "none" }

144# "/absolute/path/to/secrets" = "none"

127 145 

128################################################################################146################################################################################

129# Authentication & Login147# Authentication & Login


132# Where to persist CLI login credentials: file (default) | keyring | auto150# Where to persist CLI login credentials: file (default) | keyring | auto

133cli_auth_credentials_store = "file"151cli_auth_credentials_store = "file"

134 152 

135# Base URL for ChatGPT auth flow (not OpenAI API). Default:153# Base URL for ChatGPT auth flow (not OpenAI API).

136chatgpt_base_url = "https://chatgpt.com/backend-api/"154chatgpt_base_url = "https://chatgpt.com/backend-api/"

137 155 

156# Optional base URL override for the built-in OpenAI provider.

157# openai_base_url = "https://us.api.openai.com/v1"

158 

138# Restrict ChatGPT login to a specific workspace id. Default: unset.159# Restrict ChatGPT login to a specific workspace id. Default: unset.

139# forced_chatgpt_workspace_id = ""160# forced_chatgpt_workspace_id = "00000000-0000-0000-0000-000000000000"

140 161 

141# Force login mechanism when Codex would normally auto-select. Default: unset.162# Force login mechanism when Codex would normally auto-select. Default: unset.

142# Allowed values: chatgpt | api163# Allowed values: chatgpt | api


201# If you use --yolo or another full access sandbox setting, web search defaults to live.220# If you use --yolo or another full access sandbox setting, web search defaults to live.

202web_search = "cached"221web_search = "cached"

203 222 

204################################################################################

205# Profiles (named presets)

206################################################################################

207 

208# Active profile name. When unset, no profile is applied.223# Active profile name. When unset, no profile is applied.

209# profile = "default"224# profile = "default"

210 225 

226# Suppress the warning shown when under-development feature flags are enabled.

227# suppress_unstable_features_warning = true

228 

211################################################################################229################################################################################

212# Agents (multi-agent roles and limits)230# Agents (multi-agent roles and limits)

213################################################################################231################################################################################

214 232 

215# [agents]233[agents]

216# Maximum concurrently open agent threads. Default: 6234# Maximum concurrently open agent threads. Default: 6

217# max_threads = 6235# max_threads = 6

218# Maximum nested spawn depth. Root session starts at depth 0. Default: 1236# Maximum nested spawn depth. Root session starts at depth 0. Default: 1


221# job_max_runtime_seconds = 1800239# job_max_runtime_seconds = 1800

222 240 

223# [agents.reviewer]241# [agents.reviewer]

224# description = "Find security, correctness, and test risks in code."242# description = "Find correctness, security, and test risks in code."

225# config_file = "./agents/reviewer.toml" # relative to the config.toml that defines it243# config_file = "./agents/reviewer.toml" # relative to the config.toml that defines it

244# nickname_candidates = ["Athena", "Ada"]

226 245 

227################################################################################246################################################################################

228# Skills (per-skill overrides)247# Skills (per-skill overrides)


233# path = "/path/to/skill/SKILL.md"252# path = "/path/to/skill/SKILL.md"

234# enabled = false253# enabled = false

235 254 

236################################################################################

237# Experimental toggles (legacy; prefer [features])

238################################################################################

239 

240experimental_use_unified_exec_tool = false

241 

242# Include apply_patch via freeform editing path (affects default tool set). Default: false

243experimental_use_freeform_apply_patch = false

244 

245################################################################################255################################################################################

246# Sandbox settings (tables)256# Sandbox settings (tables)

247################################################################################257################################################################################


264[shell_environment_policy]274[shell_environment_policy]

265# inherit: all (default) | core | none275# inherit: all (default) | core | none

266inherit = "all"276inherit = "all"

267# Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: true277# Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: false

268ignore_default_excludes = true278ignore_default_excludes = false

269# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []279# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []

270exclude = []280exclude = []

271# Explicit key/value overrides (always win). Default: {}281# Explicit key/value overrides (always win). Default: {}


275# Experimental: run via user shell profile. Default: false285# Experimental: run via user shell profile. Default: false

276experimental_use_profile = false286experimental_use_profile = false

277 287 

288################################################################################

289# Managed network proxy settings

290################################################################################

291 

292# Set `default_permissions = "workspace"` before enabling this profile.

293# [permissions.workspace.network]

294# enabled = true

295# proxy_url = "http://127.0.0.1:43128"

296# admin_url = "http://127.0.0.1:43129"

297# enable_socks5 = false

298# socks_url = "http://127.0.0.1:43130"

299# enable_socks5_udp = false

300# allow_upstream_proxy = false

301# dangerously_allow_non_loopback_proxy = false

302# dangerously_allow_non_loopback_admin = false

303# dangerously_allow_all_unix_sockets = false

304# mode = "limited" # limited | full

305# allow_local_binding = false

306#

307# [permissions.workspace.network.domains]

308# "api.openai.com" = "allow"

309# "example.com" = "deny"

310#

311# [permissions.workspace.network.unix_sockets]

312# "/var/run/docker.sock" = "allow"

313 

278################################################################################314################################################################################

279# History (table)315# History (table)

280################################################################################316################################################################################


283# save-all (default) | none319# save-all (default) | none

284persistence = "save-all"320persistence = "save-all"

285# Maximum bytes for history file; oldest entries are trimmed when exceeded. Example: 5242880321# Maximum bytes for history file; oldest entries are trimmed when exceeded. Example: 5242880

286# max_bytes = 0322# max_bytes = 5242880

287 323 

288################################################################################324################################################################################

289# UI, Notifications, and Misc (tables)325# UI, Notifications, and Misc (tables)


297# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"333# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"

298# notification_method = "auto"334# notification_method = "auto"

299 335 

336# When notifications fire: unfocused (default) | always

337# notification_condition = "unfocused"

338 

300# Enables welcome/status/spinner animations. Default: true339# Enables welcome/status/spinner animations. Default: true

301animations = true340animations = true

302 341 


311# Set to [] to hide the footer.350# Set to [] to hide the footer.

312# status_line = ["model", "context-remaining", "git-branch"]351# status_line = ["model", "context-remaining", "git-branch"]

313 352 

353# Ordered list of terminal window/tab title item IDs. When unset, Codex uses:

354# ["spinner", "project"]. Set to [] to clear the title.

355# Available IDs include app-name, project, spinner, status, thread, git-branch, model,

356# and task-progress.

357# terminal_title = ["spinner", "project"]

358 

314# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.359# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.

315# You can also add custom .tmTheme files under $CODEX_HOME/themes.360# You can also add custom .tmTheme files under $CODEX_HOME/themes.

316# theme = "catppuccin-mocha"361# theme = "catppuccin-mocha"

317 362 

363# Custom key bindings. Context-specific bindings override [tui.keymap.global].

364# Use [] to unbind an action.

365# [tui.keymap.global]

366# open_transcript = "ctrl-t"

367# open_external_editor = []

368#

369# [tui.keymap.composer]

370# submit = ["enter", "ctrl-m"]

371 

372# Internal tooltip state keyed by model slug. Usually managed by Codex.

373# [tui.model_availability_nux]

374# "gpt-5.4" = 1

375 

376# Enable or disable analytics for this machine. When unset, Codex uses its default behavior.

377[analytics]

378enabled = true

379 

318# Control whether users can submit feedback from `/feedback`. Default: true380# Control whether users can submit feedback from `/feedback`. Default: true

319[feedback]381[feedback]

320enabled = true382enabled = true


326# hide_rate_limit_model_nudge = true388# hide_rate_limit_model_nudge = true

327# hide_gpt5_1_migration_prompt = true389# hide_gpt5_1_migration_prompt = true

328# "hide_gpt-5.1-codex-max_migration_prompt" = true390# "hide_gpt-5.1-codex-max_migration_prompt" = true

329# model_migrations = { "gpt-4.1" = "gpt-5.1" }391# model_migrations = { "gpt-5.3-codex" = "gpt-5.4" }

330 

331# Suppress the warning shown when under-development feature flags are enabled.

332# suppress_unstable_features_warning = true

333 392 

334################################################################################393################################################################################

335# Centralized Feature Flags (preferred)394# Centralized Feature Flags (preferred)


339# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.398# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.

340# shell_tool = true399# shell_tool = true

341# apps = false400# apps = false

342# apps_mcp_gateway = false401# codex_hooks = false

343# web_search_cached = false402# unified_exec = true

344# web_search_request = false403# shell_snapshot = true

345# unified_exec = false404# multi_agent = true

346# shell_snapshot = false

347# apply_patch_freeform = false

348# multi_agent = false

349# search_tool = false

350# personality = true405# personality = true

351# request_rule = true406# fast_mode = true

352# collaboration_modes = true407# enable_request_compression = true

353# use_linux_sandbox_bwrap = false408# skill_mcp_dependency_install = true

354# remote_models = false409# prevent_idle_sleep = false

355# runtime_metrics = false410 

356# powershell_utf8 = true411################################################################################

357# child_agents_md = false412# Memories (table)

413################################################################################

414 

415# Enable memories with [features].memories, then tune memory behavior here.

416# [memories]

417# generate_memories = true

418# use_memories = true

419# disable_on_external_context = false # legacy alias: no_memories_if_mcp_or_web_search

420 

421################################################################################

422# Lifecycle hooks can be configured here inline or in a sibling hooks.json.

423################################################################################

424 

425# [hooks]

426# [[hooks.PreToolUse]]

427# matcher = "^Bash$"

428#

429# [[hooks.PreToolUse.hooks]]

430# type = "command"

431# command = 'python3 "/absolute/path/to/pre_tool_use_policy.py"'

432# timeout = 30

433# statusMessage = "Checking Bash command"

358 434 

359################################################################################435################################################################################

360# Define MCP servers under this table. Leave empty to disable.436# Define MCP servers under this table. Leave empty to disable.


369# command = "docs-server" # required445# command = "docs-server" # required

370# args = ["--port", "4000"] # optional446# args = ["--port", "4000"] # optional

371# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is447# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is

372# env_vars = ["ANOTHER_SECRET"] # optional: forward these from the parent env448# env_vars = ["ANOTHER_SECRET"] # optional: forward local parent env vars

449# env_vars = ["LOCAL_TOKEN", { name = "REMOTE_TOKEN", source = "remote" }]

373# cwd = "/path/to/server" # optional working directory override450# cwd = "/path/to/server" # optional working directory override

451# experimental_environment = "remote" # experimental: run stdio via a remote executor

374# startup_timeout_sec = 10.0 # optional; default 10.0 seconds452# startup_timeout_sec = 10.0 # optional; default 10.0 seconds

375# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)453# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)

376# tool_timeout_sec = 60.0 # optional; default 60.0 seconds454# tool_timeout_sec = 60.0 # optional; default 60.0 seconds

377# enabled_tools = ["search", "summarize"] # optional allow-list455# enabled_tools = ["search", "summarize"] # optional allow-list

378# disabled_tools = ["slow-tool"] # optional deny-list (applied after allow-list)456# disabled_tools = ["slow-tool"] # optional deny-list (applied after allow-list)

457# scopes = ["read:docs"] # optional OAuth scopes

458# oauth_resource = "https://docs.example.com/" # optional OAuth resource

379 459 

380# --- Example: Streamable HTTP transport ---460# --- Example: Streamable HTTP transport ---

381# [mcp_servers.github]461# [mcp_servers.github]


388# startup_timeout_sec = 10.0 # optional468# startup_timeout_sec = 10.0 # optional

389# tool_timeout_sec = 60.0 # optional469# tool_timeout_sec = 60.0 # optional

390# enabled_tools = ["list_issues"] # optional allow-list470# enabled_tools = ["list_issues"] # optional allow-list

471# disabled_tools = ["delete_issue"] # optional deny-list

472# scopes = ["repo"] # optional OAuth scopes

391 473 

392################################################################################474################################################################################

393# Model Providers475# Model Providers

394################################################################################476################################################################################

395 477 

396# Built-ins include:478# Built-ins include:

397# - openai (Responses API; requires login or OPENAI_API_KEY via auth flow)479# - openai

398# - oss (Chat Completions API; defaults to http://localhost:11434/v1)480# - ollama

481# - lmstudio

482# - amazon-bedrock

483# These IDs are reserved. Use a different ID for custom providers.

399 484 

400[model_providers]485[model_providers]

401 486 

487# --- Example: built-in Amazon Bedrock provider options ---

488# model_provider = "amazon-bedrock"

489# model = "<bedrock-model-id>"

490# [model_providers.amazon-bedrock.aws]

491# profile = "default"

492# region = "eu-central-1"

493 

402# --- Example: OpenAI data residency with explicit base URL or headers ---494# --- Example: OpenAI data residency with explicit base URL or headers ---

403# [model_providers.openaidr]495# [model_providers.openaidr]

404# name = "OpenAI Data Residency"496# name = "OpenAI Data Residency"

405# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix497# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix

406# wire_api = "responses" # "responses" | "chat" (default varies)498# wire_api = "responses" # only supported value

407# # requires_openai_auth = true # built-in OpenAI defaults to true499# # requires_openai_auth = true # use only for providers backed by OpenAI auth

408# # request_max_retries = 4 # default 4; max 100500# # request_max_retries = 4 # default 4; max 100

409# # stream_max_retries = 5 # default 5; max 100501# # stream_max_retries = 5 # default 5; max 100

410# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)502# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)

503# # supports_websockets = true # optional

411# # experimental_bearer_token = "sk-example" # optional dev-only direct bearer token504# # experimental_bearer_token = "sk-example" # optional dev-only direct bearer token

412# # http_headers = { "X-Example" = "value" }505# # http_headers = { "X-Example" = "value" }

413# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }506# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }

414 507 

415# --- Example: Azure (Chat/Responses depending on endpoint) ---508# --- Example: Azure/OpenAI-compatible provider ---

416# [model_providers.azure]509# [model_providers.azure]

417# name = "Azure"510# name = "Azure"

418# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"511# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"

419# wire_api = "responses" # or "chat" per endpoint512# wire_api = "responses"

420# query_params = { api-version = "2025-04-01-preview" }513# query_params = { api-version = "2025-04-01-preview" }

421# env_key = "AZURE_OPENAI_API_KEY"514# env_key = "AZURE_OPENAI_API_KEY"

422# # env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"515# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"

516# # supports_websockets = false

517 

518# --- Example: command-backed bearer token auth ---

519# [model_providers.proxy]

520# name = "OpenAI using LLM proxy"

521# base_url = "https://proxy.example.com/v1"

522# wire_api = "responses"

523#

524# [model_providers.proxy.auth]

525# command = "/usr/local/bin/fetch-codex-token"

526# args = ["--audience", "codex"]

527# timeout_ms = 5000

528# refresh_interval_ms = 300000

423 529 

424# --- Example: Local OSS (e.g., Ollama-compatible) ---530# --- Example: Local OSS (e.g., Ollama-compatible) ---

425# [model_providers.ollama]531# [model_providers.local_ollama]

426# name = "Ollama"532# name = "Ollama"

427# base_url = "http://localhost:11434/v1"533# base_url = "http://localhost:11434/v1"

428# wire_api = "chat"534# wire_api = "responses"

429 

430################################################################################

431# Profiles (named presets)

432################################################################################

433 

434[profiles]

435 

436# [profiles.default]

437# model = "gpt-5.2-codex"

438# model_provider = "openai"

439# approval_policy = "on-request"

440# sandbox_mode = "read-only"

441# oss_provider = "ollama"

442# model_reasoning_effort = "medium"

443# model_reasoning_summary = "auto"

444# model_verbosity = "medium"

445# personality = "friendly" # or "pragmatic" or "none"

446# chatgpt_base_url = "https://chatgpt.com/backend-api/"

447# model_catalog_json = "./models.json"

448# experimental_compact_prompt_file = "./compact_prompt.txt"

449# include_apply_patch_tool = false

450# experimental_use_unified_exec_tool = false

451# experimental_use_freeform_apply_patch = false

452# tools.web_search = false # deprecated legacy alias; prefer top-level `web_search`

453# features = { unified_exec = false }

454 535 

455################################################################################536################################################################################

456# Apps / Connectors537# Apps / Connectors


474# enabled = false555# enabled = false

475# approval_mode = "approve"556# approval_mode = "approve"

476 557 

558# Optional tool suggestion allowlist for connectors or plugins Codex can offer to install.

559# [tool_suggest]

560# discoverables = [

561# { type = "connector", id = "gmail" },

562# { type = "plugin", id = "figma@openai-curated" },

563# ]

564# disabled_tools = [

565# { type = "plugin", id = "slack@openai-curated" },

566# { type = "connector", id = "connector_googlecalendar" },

567# ]

568 

569################################################################################

570# Profiles (named presets)

571################################################################################

572 

573[profiles]

574 

575# [profiles.default]

576# model = "gpt-5.4"

577# model_provider = "openai"

578# approval_policy = "on-request"

579# sandbox_mode = "read-only"

580# service_tier = "flex"

581# oss_provider = "ollama"

582# model_reasoning_effort = "medium"

583# plan_mode_reasoning_effort = "high"

584# model_reasoning_summary = "auto"

585# model_verbosity = "medium"

586# personality = "pragmatic" # or "friendly" or "none"

587# chatgpt_base_url = "https://chatgpt.com/backend-api/"

588# model_catalog_json = "./models.json"

589# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"

590# experimental_compact_prompt_file = "./compact_prompt.txt"

591# tools_view_image = true

592# features = { unified_exec = false }

593 

477################################################################################594################################################################################

478# Projects (trust levels)595# Projects (trust levels)

479################################################################################596################################################################################

480 597 

481# Mark specific worktrees as trusted or untrusted.

482[projects]598[projects]

599# Mark specific worktrees as trusted or untrusted.

483# [projects."/absolute/path/to/project"]600# [projects."/absolute/path/to/project"]

484# trust_level = "trusted" # or "untrusted"601# trust_level = "trusted" # or "untrusted"

485 602 

603################################################################################

604# Tools

605################################################################################

606 

607[tools]

608# view_image = true

609 

486################################################################################610################################################################################

487# OpenTelemetry (OTEL) - disabled by default611# OpenTelemetry (OTEL) - disabled by default

488################################################################################612################################################################################


496exporter = "none"620exporter = "none"

497# Trace exporter: none (default) | otlp-http | otlp-grpc621# Trace exporter: none (default) | otlp-http | otlp-grpc

498trace_exporter = "none"622trace_exporter = "none"

623# Metrics exporter: none | statsig | otlp-http | otlp-grpc

624metrics_exporter = "statsig"

499 625 

500# Example OTLP/HTTP exporter configuration626# Example OTLP/HTTP exporter configuration

501# [otel.exporter."otlp-http"]627# [otel.exporter."otlp-http"]


505# [otel.exporter."otlp-http".headers]631# [otel.exporter."otlp-http".headers]

506# "x-otlp-api-key" = "${OTLP_TOKEN}"632# "x-otlp-api-key" = "${OTLP_TOKEN}"

507 633 

508# Example OTLP/gRPC exporter configuration

509# [otel.exporter."otlp-grpc"]

510# endpoint = "https://otel.example.com:4317",

511# headers = { "x-otlp-meta" = "abc123" }

512 

513# Example OTLP exporter with mutual TLS

514# [otel.exporter."otlp-http"]

515# endpoint = "https://otel.example.com/v1/logs"

516# protocol = "binary"

517 

518# [otel.exporter."otlp-http".headers]

519# "x-otlp-api-key" = "${OTLP_TOKEN}"

520 

521# [otel.exporter."otlp-http".tls]634# [otel.exporter."otlp-http".tls]

522# ca-certificate = "certs/otel-ca.pem"635# ca-certificate = "certs/otel-ca.pem"

523# client-certificate = "/etc/codex/certs/client.pem"636# client-certificate = "/etc/codex/certs/client.pem"

524# client-private-key = "/etc/codex/certs/client-key.pem"637# client-private-key = "/etc/codex/certs/client-key.pem"

525```

526 638 

527################################################################################639# Example OTLP/gRPC trace exporter configuration

640# [otel.trace_exporter."otlp-grpc"]

641# endpoint = "https://otel.example.com:4317"

642# headers = { "x-otlp-meta" = "abc123" }

528 643 

644################################################################################

529# Windows645# Windows

530 

531################################################################################646################################################################################

532 647 

533[windows]648[windows]

534 

535# Native Windows sandbox mode (Windows only): unelevated | elevated649# Native Windows sandbox mode (Windows only): unelevated | elevated

536 

537sandbox = "unelevated"650sandbox = "unelevated"

651```