config-sample.md +60 −26
107# - untrusted: only known-safe read-only commands auto-run; others prompt107# - untrusted: only known-safe read-only commands auto-run; others prompt
108# - on-request: model decides when to ask (default)108# - on-request: model decides when to ask (default)
109# - never: never prompt (risky)109# - never: never prompt (risky)
110110# - { reject = { ... } }: auto-reject selected prompt categories# - { granular = { ... } }: allow or auto-reject selected prompt categories
111approval_policy = "on-request"111approval_policy = "on-request"
112112# Example granular auto-reject policy:# Who reviews eligible approval prompts: user (default) | guardian_subagent
113113# approval_policy = { reject = { sandbox_approval = true, rules = false, mcp_elicitations = false } }# approvals_reviewer = "user"
114
115# Example granular policy:
116# approval_policy = { granular = {
117# sandbox_approval = true,
118# rules = true,
119# mcp_elicitations = true,
120# request_permissions = false,
121# skill_approval = false
122# } }
114 123
115# Allow login-shell semantics for shell-based tools when they request `login = true`.124# Allow login-shell semantics for shell-based tools when they request `login = true`.
116# Default: true. Set false to force non-login shells and reject explicit login-shell requests.125# Default: true. Set false to force non-login shells and reject explicit login-shell requests.
121# - workspace-write130# - workspace-write
122# - danger-full-access (no sandbox; extremely risky)131# - danger-full-access (no sandbox; extremely risky)
123sandbox_mode = "read-only"132sandbox_mode = "read-only"
133# Named permissions profile to apply by default. Required before using [permissions.<name>].
134# default_permissions = "workspace"
124 135
125################################################################################136################################################################################
126# Authentication & Login137# Authentication & Login
132# Base URL for ChatGPT auth flow (not OpenAI API).143# Base URL for ChatGPT auth flow (not OpenAI API).
133chatgpt_base_url = "https://chatgpt.com/backend-api/"144chatgpt_base_url = "https://chatgpt.com/backend-api/"
134 145
146# Optional base URL override for the built-in OpenAI provider.
147# openai_base_url = "https://us.api.openai.com/v1"
148
135# Restrict ChatGPT login to a specific workspace id. Default: unset.149# Restrict ChatGPT login to a specific workspace id. Default: unset.
136# forced_chatgpt_workspace_id = "00000000-0000-0000-0000-000000000000"150# forced_chatgpt_workspace_id = "00000000-0000-0000-0000-000000000000"
137 151
265# Managed network proxy settings279# Managed network proxy settings
266################################################################################280################################################################################
267 281
268282[permissions.network]# Set `default_permissions = "workspace"` before enabling this profile.
283# [permissions.workspace.network]
269# enabled = true284# enabled = true
270# proxy_url = "http://127.0.0.1:43128"285# proxy_url = "http://127.0.0.1:43128"
271# admin_url = "http://127.0.0.1:43129"286# admin_url = "http://127.0.0.1:43129"
277# dangerously_allow_non_loopback_admin = false292# dangerously_allow_non_loopback_admin = false
278# dangerously_allow_all_unix_sockets = false293# dangerously_allow_all_unix_sockets = false
279# mode = "limited" # limited | full294# mode = "limited" # limited | full
280# allowed_domains = ["api.openai.com"]
281# denied_domains = ["example.com"]
282# allow_unix_sockets = ["/var/run/docker.sock"]
283# allow_local_binding = false295# allow_local_binding = false
296#
297# [permissions.workspace.network.domains]
298# "api.openai.com" = "allow"
299# "example.com" = "deny"
300#
301# [permissions.workspace.network.unix_sockets]
302# "/var/run/docker.sock" = "allow"
284 303
285################################################################################304################################################################################
286# History (table)305# History (table)
318# Set to [] to hide the footer.337# Set to [] to hide the footer.
319# status_line = ["model", "context-remaining", "git-branch"]338# status_line = ["model", "context-remaining", "git-branch"]
320 339
340# Ordered list of terminal window/tab title item IDs. When unset, Codex uses:
341# ["spinner", "project"]. Set to [] to clear the title.
342# Available IDs include app-name, project, spinner, status, thread, git-branch, model,
343# and task-progress.
344# terminal_title = ["spinner", "project"]
345
321# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.346# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.
322# You can also add custom .tmTheme files under $CODEX_HOME/themes.347# You can also add custom .tmTheme files under $CODEX_HOME/themes.
323# theme = "catppuccin-mocha"348# theme = "catppuccin-mocha"
341# hide_rate_limit_model_nudge = true366# hide_rate_limit_model_nudge = true
342# hide_gpt5_1_migration_prompt = true367# hide_gpt5_1_migration_prompt = true
343# "hide_gpt-5.1-codex-max_migration_prompt" = true368# "hide_gpt-5.1-codex-max_migration_prompt" = true
344369# model_migrations = { "gpt-4.1" = "gpt-5.1" }# model_migrations = { "gpt-5.3-codex" = "gpt-5.4" }
345 370
346################################################################################371################################################################################
347# Centralized Feature Flags (preferred)372# Centralized Feature Flags (preferred)
351# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.376# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.
352# shell_tool = true377# shell_tool = true
353# apps = false378# apps = false
354379# apps_mcp_gateway = false# codex_hooks = false
355380# unified_exec = false# unified_exec = true
356381# shell_snapshot = false# shell_snapshot = true
357382# multi_agent = false# multi_agent = true
358# personality = true383# personality = true
359# use_linux_sandbox_bwrap = false
360# runtime_metrics = true
361# powershell_utf8 = true
362# child_agents_md = false
363# sqlite = true
364# fast_mode = true384# fast_mode = true
385# guardian_approval = false
365# enable_request_compression = true386# enable_request_compression = true
366# image_generation = false
367# skill_mcp_dependency_install = true387# skill_mcp_dependency_install = true
368# skill_env_var_dependency_prompt = false
369# default_mode_request_user_input = false
370# artifact = false
371# prevent_idle_sleep = false388# prevent_idle_sleep = false
372# responses_websockets = false
373# responses_websockets_v2 = false
374# image_detail_original = false
375 389
376################################################################################390################################################################################
377# Define MCP servers under this table. Leave empty to disable.391# Define MCP servers under this table. Leave empty to disable.
418# - openai432# - openai
419# - ollama433# - ollama
420# - lmstudio434# - lmstudio
435# These IDs are reserved. Use a different ID for custom providers.
421 436
422[model_providers]437[model_providers]
423 438
426# name = "OpenAI Data Residency"441# name = "OpenAI Data Residency"
427# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix442# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix
428# wire_api = "responses" # only supported value443# wire_api = "responses" # only supported value
429444# # requires_openai_auth = true # built-in OpenAI defaults to true# # requires_openai_auth = true # use only for providers backed by OpenAI auth
430# # request_max_retries = 4 # default 4; max 100445# # request_max_retries = 4 # default 4; max 100
431# # stream_max_retries = 5 # default 5; max 100446# # stream_max_retries = 5 # default 5; max 100
432# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)447# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)
445# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"460# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"
446# # supports_websockets = false461# # supports_websockets = false
447 462
463# --- Example: command-backed bearer token auth ---
464# [model_providers.proxy]
465# name = "OpenAI using LLM proxy"
466# base_url = "https://proxy.example.com/v1"
467# wire_api = "responses"
468#
469# [model_providers.proxy.auth]
470# command = "/usr/local/bin/fetch-codex-token"
471# args = ["--audience", "codex"]
472# timeout_ms = 5000
473# refresh_interval_ms = 300000
474
448# --- Example: Local OSS (e.g., Ollama-compatible) ---475# --- Example: Local OSS (e.g., Ollama-compatible) ---
449476# [model_providers.ollama]# [model_providers.local_ollama]
450# name = "Ollama"477# name = "Ollama"
451# base_url = "http://localhost:11434/v1"478# base_url = "http://localhost:11434/v1"
452# wire_api = "responses"479# wire_api = "responses"
473# enabled = false500# enabled = false
474# approval_mode = "approve"501# approval_mode = "approve"
475 502
503# Optional tool suggestion allowlist for connectors or plugins Codex can offer to install.
504# [tool_suggest]
505# discoverables = [
506# { type = "connector", id = "gmail" },
507# { type = "plugin", id = "figma@openai-curated" },
508# ]
509
476################################################################################510################################################################################
477# Profiles (named presets)511# Profiles (named presets)
478################################################################################512################################################################################