Update concourse completion to ver 7.14.3
This commit is contained in:
parent
d59350592b
commit
299d6cc0c3
919
src/_concourse
919
src/_concourse
|
|
@ -15,10 +15,24 @@
|
|||
|
||||
(( $+functions[_concourse_server] )) ||
|
||||
_concourse_server() {
|
||||
|
||||
local context state state_descr line ret=1
|
||||
typeset -A opt_args
|
||||
|
||||
local -a concourse_postgres_configurations=(
|
||||
'--postgres-host=[the host to connect to]: :_hosts'
|
||||
'--postgres-port=[the port to connect to]: :_concourse_ports'
|
||||
'--postgres-socket=[path to a UNIX domain socket to connect to]: :_files'
|
||||
'--postgres-user=[the user to sign in as]: :_users'
|
||||
"--postgres-password=[the user's password]:password"
|
||||
'--postgres-sslmode=[whether or not to use SSL(default: disable)]:SSL mode:(disable require verify-ca verify-full)'
|
||||
'--postgres-sslnegotiation=[how SSL encryption is negotiated with the server(default: postgres)]: :(postgres direct)'
|
||||
'--postgres-ca-cert=[CA cert file location, to verify when connecting with SSL]: :_files'
|
||||
'--postgres-client-cert=[client cert file location]: :_files'
|
||||
'--postgres-client-key=[client key file location]: :_files'
|
||||
'--postgres-connect-timeout=[dialing timeout]: :_concourse_durations'
|
||||
'--postgres-database=[the name of the database to use(default: atc)]:database name'
|
||||
)
|
||||
|
||||
_arguments -C \
|
||||
'(- : *)'{-h,--help}'[display help information]' \
|
||||
'(- : *)'{-v,--version}'[print the version of Concourse and exit]' \
|
||||
|
|
@ -29,15 +43,20 @@ _concourse_server() {
|
|||
case $state in
|
||||
(command)
|
||||
_concourse_commands && ret=0
|
||||
;;
|
||||
;;
|
||||
(arguments)
|
||||
curcontext=${curcontext%:*:*}:concourse-$words[1]:
|
||||
if (( $+functions[_concourse_${words[1]}_args] )); then
|
||||
_concourse_${words[1]}_args && ret=0
|
||||
|
||||
if [[ $words[1] == "quickstart" || $words[1] == "web" ]]; then
|
||||
_concourse_quickstart_or_web "${words[1]}" && ret=0
|
||||
else
|
||||
_message "unknown command ${words[1]}" && ret=1
|
||||
if (( $+functions[_concourse_${words[1]}] )); then
|
||||
_concourse_${words[1]} && ret=0
|
||||
else
|
||||
_message "unknown command ${words[1]}" && ret=1
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
;;
|
||||
esac
|
||||
|
||||
return ret
|
||||
|
|
@ -57,18 +76,442 @@ _concourse_commands() {
|
|||
_describe -t commands commands commands
|
||||
}
|
||||
|
||||
(( $+functions[_concourse_generate-key_args] )) ||
|
||||
_concourse_generate-key_args() {
|
||||
_arguments -C \
|
||||
(( $+functions[_concourse_quickstart_or_web] )) ||
|
||||
_concourse_quickstart_or_web() {
|
||||
local command="$1"
|
||||
local ret=1
|
||||
|
||||
# define common variables
|
||||
|
||||
local -a concourse_web_configurations=(
|
||||
'--peer-address=[network address of this web node, reachable by other web nodes]: :_concourse_host_colon_ports'
|
||||
'--log-level=[minimum level of logs to see]:level:_concourse_log_levels'
|
||||
'--bind-ip=[IP address on which to listen for web traffic]: :_concourse_ip_addresses'
|
||||
'--bind-port=[port on which to listen for HTTP traffic(default: 8000)]: :_concourse_ports'
|
||||
'--tls-bind-port=[port on which to listen for HTTPS traffic]: :_concourse_ports'
|
||||
'--tls-cert=[file containing an SSL certificate]: :_files'
|
||||
'--tls-key=[file containing an RSA private key, used to encrypt HTTPS traffic]: :_files'
|
||||
'--tls-ca-cert=[file containing the client CA certificate, enables mTLS]: :_files'
|
||||
'--external-url=[URL used to reach any ATC from the outside world]: :_urls'
|
||||
'--concurrent-request-limit=[limit the number of concurrent requests to an API endpoint]:limit'
|
||||
'--api-max-conns=[maximum number of open connections for the api connection pool(default: 10)]:limit'
|
||||
'--backend-max-conns=[maximum number of open connections for the backend connection pool(default: 50)]:limit'
|
||||
'--encryption-key=[a 16 or 32 length key used to encrypt sensitive information before storing it in the database]:encryption key'
|
||||
'--old-encryption-key=[encryption key previously used for encrypting sensitive information]:encryption key'
|
||||
'--debug-bind-ip=[IP address on which to listen for the pprof debugger endpoints]: :_concourse_ip_addresses'
|
||||
'--debug-bind-port=[port on which to listen for the pprof debugger endpoints]: :_concourse_ports'
|
||||
'--intercept-idle-timeout=[length of time for a intercepted session to be idle before terminating]: :_concourse_durations'
|
||||
'--component-runner-interval=[interval on which runners are kicked off for builds, locks, scans and checks]:interval:_concourse_durations'
|
||||
'--lidar-scanner-interval[interval on which the resource scanner will run to see if new checkes need to be scheduled]:interval:_concourse_durations'
|
||||
'--global-resource-check-timeout=[time limit on checking for new versions of resources]: :_concourse_durations'
|
||||
'--resource-checking-interval=[interval on which to check for new versions of resources]: :_concourse_durations'
|
||||
'--resource-type-checking-interval=[interval on which to check for new versions of resource types]: :_concourse_durations'
|
||||
'--resource-with-webhook-checking-interval=[interval on which to check for new versions of resources that has webhook defined]:interval:_concourse_durations'
|
||||
'--max-checks-per-second=[maximum number of checks that can be started per second]:number'
|
||||
'--pause-pipelines-after=[number of days after which a pipeline will be automatically paused when there are no jobs]:days'
|
||||
'--baggageclaim-response-header-timeout=[how long to wait for Baggageclaim to send the response header]: :_concourse_durations'
|
||||
'--streaming-artifacts-compression=[compression algorithm for internal streaming(default: gzip)]:alg:(gzip zstd raw)'
|
||||
'--streaming-size-limitation=[internal volume streaming size limitation in MB]:size'
|
||||
'--garden-request-timeout=[how long to wait for requests to Garden to complete(default: 5m)]: :_concourse_durations'
|
||||
'--cli-artifacts-dir=[directory containing downloadable CLI binaries]: :_files -/'
|
||||
'--web-public-dir=[Web public/ directory to server live for local development]:dir:_files -/'
|
||||
'--log-db-queries[log database queries]'
|
||||
'--log-cluster-name[log cluster name]'
|
||||
'--build-tracker-interval=[interval on which to run build tracking]: :_concourse_durations'
|
||||
'--default-build-logs-to-retain=[default build logs to retain, 0 means all]:number'
|
||||
'--max-build-logs-to-retain=[maximum build logs to retain, 0 means not specified]:number'
|
||||
'--default-days-to-retain-build-logs=[default days to retain build logs. 0 means unlimited]:number'
|
||||
'--max-days-to-retain-build-logs=[maximum days to retain build logs, 0 means not specified]:number'
|
||||
'--job-scheduling-max-in-flight=[maximum number of jobs to be scheduling at the same time(default: 32)]:number'
|
||||
'--default-task-cpu-limit=[default max number of cpu shares per task, 0 means unlimited]:number'
|
||||
'--default-task-memory-limit=[default maximum memory per task, 0 means unlimited]:number'
|
||||
'--enable-build-auditing[enable auditing for all api requests connected to builds]'
|
||||
'--enable-container-auditing[enable auditing for all api requests connected to containers]'
|
||||
'--enable-job-auditing[enable auditing for all api requests connected to jobs]'
|
||||
'--enable-pipeline-auditing[enable auditing for all api requests connected to pipelines]'
|
||||
'--enable-resource-auditing[enable auditing for all api requests connected to resources]'
|
||||
'--enable-system-auditing[enable auditing for all api requests connected to system transactions]'
|
||||
'--enable-team-auditing[enable auditing for all api requests connected to teams]'
|
||||
'--enable-worker-auditing[enable auditing for all api requests connected to workers]'
|
||||
'--enable-volume-auditing[enable auditing for all api requests connected to volumes]'
|
||||
'--config-rbac=[customize RBAC role-action mapping]:mapping'
|
||||
'--system-claim-key=[token claim key to use when matching system-claim-values(default: aud)]:key'
|
||||
'--system-claim-value=[configure which token requests should be considered "system" requests(default: concourse-worker)]:value'
|
||||
'--base-resource-type-defaults=[base resource type defaults]:type'
|
||||
'--p2p-volume-streaming-timeout=[timeout of p2p volume streaming(default: 15m)]: :_concourse_durations'
|
||||
'--display-user-id-per-connector=[define how to display user ID for each authentication connector]:how'
|
||||
'--default-get-timeout=[default timeout of get steps]: :_concourse_durations'
|
||||
'--default-put-timeout=[default timeout of put steps]: :_concourse_durations'
|
||||
'--default-task-timeout=[default timeout of task steps]: :_concourse_durations'
|
||||
'--num-goroutine-threshold=[when number of goroutines reaches to this threshold, then slow down current ATC]:num'
|
||||
'--db-notification-bus-queue-size=[DB notification bus queue size(default: 10000)]:size' \
|
||||
)
|
||||
|
||||
local -a concourse_credential_manager_configurations=(
|
||||
# Credential Management
|
||||
'--secret-retry-attempts=[the number of attempts secret will be retried to be fetched, in case a retriable error happens]:number'
|
||||
'--secret-retry-interval=[the interval between secret retry retrieval attempts]: :_concourse_durations'
|
||||
'--secret-cache-enabled[enable in-memory cache for secrets]'
|
||||
'--secret-cache-duration=[secret values will be cached for not longer than this duration]: :_concourse_durations'
|
||||
'--secret-cache-duration-notfound=[secret not found responses will be cached for this duration]: :_concourse_durations'
|
||||
'--secret-cache-purge-interval=[if the cache is enabled, expired items will be removed on this internal]: :_concourse_durations'
|
||||
# Vault Credential Management
|
||||
'--vault-url=[vault server address used to access secrets]: :_urls'
|
||||
'--vault-path-prefix=[path under which to namespace credential lookup]:prefix'
|
||||
'--vault-lookup-templates=[path templates for credential lookup]: :_files'
|
||||
'--vault-shared-path=[path under which to lookup shared credentials]:path'
|
||||
'--vault-namespace=[Vault namespace to use for authentication and secret lookup]:namespace'
|
||||
'--vault-login-timeout=[timeout value for Vault login(default: 60s)]: :_concourse_durations'
|
||||
'--vault-query-timeout=[timeout value for Vault queue(default: 60s)]: :_concourse_durations'
|
||||
'--vault-disable-srv-lookup[disable that client will lookup the host through DNS SRV lookup]'
|
||||
'--vault-ca-cert=[path to a PEM-encoded CA cert file to use to verify the vault server SSL cert]: :_files'
|
||||
'--vault-ca-path=[path to a directory of PEM-encoded CA cert files to verify the vault server SSL cert]: :_files -/'
|
||||
'--vault-client-cert=[path to the client certificate for Vault authorization]: :_files'
|
||||
'--vault-client-key=[path to the client private key for Vault authorization]: :_files'
|
||||
'--vault-server-name=[if set, is used to set the SNI host when connecting via TLS]:server name'
|
||||
'--vault-insecure-skip-verify[enable insecure SSL verification]'
|
||||
'--vault-client-token=[client token for accessing secrets within the Vault server]:client token'
|
||||
'--vault-client-token-path=[absolute path to a file containing the Vault client token]: :_files'
|
||||
'--vault-auth-backend=[auth backend to use for logging in to Vault]:auth backend'
|
||||
'--vault-auth-backend-max-ttl=[time after which to force a re-login]: :_concourse_durations'
|
||||
'--vault-retry-max=[the maximum time between retries when logging in or re-authing a secret]: :_concourse_durations'
|
||||
'--vault-retry-initial=[the initial time between retries when logging in or re-authing a secret]: :_concourse_durations'
|
||||
'*--vault-auth-param=[parameter to pass when logging in via the backend]: :_concourse_name_colon_values'
|
||||
# Conjur Credential Management
|
||||
'--conjur-appliance-url=[URL of the conjur instance]: :_urls'
|
||||
'--conjur-account=[Conjur Account]:account'
|
||||
'--conjur-cert-file=[cert file used if conjur instance is using a self signed cert]: :_files'
|
||||
'--conjur-authn-login=[host username for conjur authn login]:host'
|
||||
'--conjur-authn-api-key=[Api key related to the host for conjur authn]:api_key'
|
||||
'--conjur-authn-token-file=[token file used if conjur instance is running in k8s or iam]: :_files'
|
||||
'--conjur-pipeline-secret-template=[Conjur secret identifier template used for pipeline specific parameter]:template'
|
||||
'--conjur-team-secret-template=[Conjur secret identifier template used for team specific parameter]:template'
|
||||
'--conjur-secret-template=[Conjur secret identifier template used for full path conjur secrets]:template'
|
||||
# CredHub Credential Management
|
||||
'--credhub-url=[CredHub server address used to access secrets]: :_urls'
|
||||
'--credhub-path-prefix=[path under which to namespace credential lookup]:path'
|
||||
'--credhub-ca-cert=[path to PEM-encoded CA cert files to use to verify the CredHub server SSL cert]: :_files'
|
||||
'--credhub-client-cert=[path to the client certificate for mutual TLS authorization]: :_files'
|
||||
'--credhub-client-key=[path to the client private key for mutual TLS authorization]: :_files'
|
||||
'--credhub-insecure-skip-verify[enable insecure SSL verification]'
|
||||
'--credhub-client-id=[client ID for CredHub authorization]:client ID'
|
||||
'--credhub-client-secret=[client secret for CredHub authorization]:client secret'
|
||||
# Dummy Credential Management
|
||||
'--dummy-creds-var=[a YAML value to expose via credential management]:key_val'
|
||||
# Kubernetes Credential Management
|
||||
'--kubernetes-in-cluster[enable the Kubernetes in-cluster client]'
|
||||
'--kubernetes-config-path=[path to Kubernetes config]: :_files'
|
||||
'--kubernetes-namespace-prefix=[prefix to use for Kubernetes namespace]:prefix'
|
||||
# AWS SecretsManager Credential Management
|
||||
'--aws-secretsmanager-access-key=[AWS Access key ID]:access key'
|
||||
'--aws-secretsmanager-secret-key=[AWS Secret Access Key]:secret key'
|
||||
'--aws-secretsmanager-session-token=[AWS Session Token]:session token'
|
||||
'--aws-secretsmanager-region=[AWS region to send requests to]:region'
|
||||
'--aws-secretsmanager-pipeline-secret-template=[AWS Secrets Manager secret identifier template used for pipeline specific parameter]:template'
|
||||
'--aws-secretsmanager-team-secret-template=[AWS Secrets Manager secret identifier template used for team specific parameter]:template'
|
||||
'--aws-secretsmanager-shared-secret-template=[AWS Secrets Manager secret identifier templated used for shared parameter]:template'
|
||||
# AWS SSM Credential Management
|
||||
'--aws-ssm-access-key=[AWS Access key ID]:access key'
|
||||
'--aws-ssm-secret-key=[AWS Secret Access Key]:secret key'
|
||||
'--aws-ssm-session-token=[AWS Session Token]:session token'
|
||||
'--aws-ssm-region=[AWS region to send requests to]:region'
|
||||
'--aws-ssm-pipeline-secret-template=[AWS SSM parameter name template used for pipeline specific parameter]:template'
|
||||
'--aws-ssm-team-secret-template=[AWS SSM parameter name template used for team specific parameter]:template'
|
||||
'--aws-ssm-shared-path=[AWS SSM parameter path used for shared parameters]: :_files'
|
||||
)
|
||||
|
||||
local -a concourse_placement_strategies=(
|
||||
volume-locality random fewest-build-containers limit-active-tasks limit-active-containers
|
||||
limit-active-volumes
|
||||
)
|
||||
local -a concourse_second_placement_strategies=(
|
||||
random fewest-build-containers limit-active-tasks limit-active-containers limit-active-volumes
|
||||
)
|
||||
|
||||
local -a concourse_container_placement_strategy_configurations=(
|
||||
'--container-placement-strategy=[method by which a worker is selected during container placement]: :(($concourse_placement_strategies))'
|
||||
'--no-input-container-placement-strategy=[a second container placement strategy]: :(($concourse_second_placement_strategies))'
|
||||
'--check-container-placement-strategy=[a third container placement strategy]: :(($concourse_second_placement_strategies))'
|
||||
'--max-active-tasks-per-worker=[maximum allowed number of active build tasks per worker]:tasks'
|
||||
'--max-active-containers-per-worker=[maximum allowed number of active containers per worker]:containers'
|
||||
'--max-active-volumes-per-worker=[maximum allowed number of active volumes per worker]:volumes'
|
||||
)
|
||||
|
||||
local -a concourse_metric_configurations=(
|
||||
# Metrics & Diagnostics
|
||||
'--metrics-host-name=[host string to attach to emitted metrics]: :_hosts'
|
||||
'*--metrics-attribute=[a key-value attribute to attach to emitted metrics]: :_concourse_name_colon_values'
|
||||
'--metrics-buffer-size=[size of the buffer used in emitting event metrics(default: 1000)]:size'
|
||||
'--capture-error-metrics[enable capturing of error log metrics]'
|
||||
# Metric Emitter (Datadog)
|
||||
'--datadog-agent-host=[datadog agent host to expose dogstatsd metrics]: :_hosts'
|
||||
'--datadog-agent-port=[datadog agent port to expose dogstatsd metrics]: :_concourse_ports'
|
||||
'--datadog-agent-uds-filepath=[Datadog agent unix domain socket filepath]: :_files'
|
||||
'--datadog-prefix=[prefix for all metrics to easily find them in Datadog]:prefix'
|
||||
# Metric Emitter (InfluxDB)
|
||||
'--influxdb-url=[influxDB server address to emit points to]: :_urls'
|
||||
'--influxdb-database=[influxDB database to write points to]:database name'
|
||||
'--influxdb-username=[influxDB server username]: :_users'
|
||||
'--influxdb-password=[influxDB server password]:password'
|
||||
'--influxdb-insecure-skip-verify[skip SSL verification when emitting to InfluxDB]'
|
||||
'--influxdb-batch-size=[number of points to batch together when emitting to InfluxDB(default: 5000)]:size'
|
||||
'--influxdb-batch-duration=[duration to wait before emitting a batch of points to InfluxDB(default: 300s)]: :_concourse_durations'
|
||||
# Metric Emitter (Lager)
|
||||
'--emit-to-logs[emit metrics to logs]'
|
||||
# Metric Emitter (NewRelic)
|
||||
'--newrelic-account-id=[new Relic Account ID]:account ID'
|
||||
'--newrelic-api-key=[new Relic Insights API Key]:API key'
|
||||
'--newrelic-insights-api-url=[Base Url for insights Insert API]: :_urls'
|
||||
'--newrelic-service-prefix=[an optional prefix for emitted New Relic events]:prefix'
|
||||
'--newrelic-batch-size=[number of events to batch together before emitting(default: 2000)]:size'
|
||||
'--newrelic-batch-duration=[length of time to wait between emitting(default: 60s)]: :_concourse_durations'
|
||||
'--newrelic-batch-disable-compression=[disable compression of the batch before sending it]'
|
||||
# Metric Emitter (Prometheus)
|
||||
'--prometheus-bind-ip=[IP to listen on to expose Prometheus metrics]: :_concourse_ip_addresses'
|
||||
'--prometheus-bind-port=[port to listen on to expose Prometheus metrics]: :_concourse_ports'
|
||||
)
|
||||
|
||||
local -a concourse_tracing_configurations=(
|
||||
'--tracing-service-name=[service name to attach to traces as metadata(default: concourse-web)]:name'
|
||||
'--tracing-attribute=[attributes to attach to traces as metadata]:attributes'
|
||||
'--tracing-honeycomb-api-key=[honeycomb.io api key]:key'
|
||||
'--tracing-honeycomb-dataset=[honeycomb.io dataset name]:name'
|
||||
'--tracing-jaeger-endpoint=[jaeger http-based thrift collected]:endpoint'
|
||||
'--tracing-jaeger-tags=[tags to add to the components]:tags'
|
||||
'--tracing-jaeger-service=[jaeger process service name(default; web)]:name'
|
||||
"--tracing-stackdriver-projectid=[GCP's project ID]:project_id"
|
||||
'--tracing-otlp-address=[odlp address to send traces to]:address'
|
||||
'--tracing-otlp-header=[headers to attach to each tracing messages]:header'
|
||||
'--tracing-otlp-use-tls[whether to use TLS or not]'
|
||||
)
|
||||
|
||||
local -a concourse_policy_check_agent_configurations=(
|
||||
# Policy Checking
|
||||
'--policy-check-filter-http-method=[API http method to go through policy check]:method'
|
||||
'--policy-check-filter-action=[actions in the list will go through policy check]:actions'
|
||||
'--policy-check-filter-action-skip=[actions the list will not go through policy check]:actions'
|
||||
# Policy Check Agent (Open Policy Agent):
|
||||
'--opa-url=[OPA policy check endpoint]:url:_urls'
|
||||
'--opa-timeout=[OPA request timeout(default: 5s)]: :_concourse_durations'
|
||||
'--opa-result-allowed-key=[key name of if pass policy check in OPA returned result]:key'
|
||||
'--opa-result-should-block-key=[key name of if should block current action in OPA returned result]:key'
|
||||
'--opa-result-messages-key=[key name of messages in OPA returned result]:key'
|
||||
)
|
||||
|
||||
local -a concourse_web_server_configurations=(
|
||||
'--x-frame-options=[the value to set for X-Frame-Options]:options'
|
||||
'--content-security-policy=[value to set for Content-Security-Policy header]:value'
|
||||
'--strict-transport-security=[value to set for the Strict-Transport-Security header]:value'
|
||||
'--cluster-name=[a name for this Concourse cluster, to be displayed on the dashboard page]:name'
|
||||
'--client-id=[client ID to use for login flow(default: concourse-web)]:id'
|
||||
'--client-secret=[client secret to use for login flow]:secret'
|
||||
)
|
||||
|
||||
local -a concourse_gc_configurations=(
|
||||
'--gc-interval=[interval on which to perform garbage collection]: :_concourse_durations'
|
||||
'--gc-one-off-grace-period=[period after which one-off build containers will be garbage-collected]: :_concourse_durations'
|
||||
'--gc-missing-grace-period=[period after which to reap containers and volumes that were created but went missing from the worker]: :_concourse_durations'
|
||||
'--gc-hijack-grace-period=[period after which hijacked containers will be garbage collected]: :_concourse_durations'
|
||||
'--gc-failed-grace-period=[period after which failed containers will be garbage collected]: :_concourse_durations'
|
||||
'--gc-check-recycle-period=[period after which to reap checks that are completed]: :_concourse_durations'
|
||||
'--gc-var-source-recycle-period=[period after which to reap var_sources that are not used]: :_concourse_durations'
|
||||
)
|
||||
|
||||
local -a concourse_syslog_configurations=(
|
||||
'--syslog-hostname=[client hostname with which the build logs will be sent to the syslog server]: :_hosts'
|
||||
'--syslog-address=[remote syslog server address with port]: :_concourse_host_colon_ports'
|
||||
'--syslog-transport=[transport protocol for syslog messages]:protocol:(tcp udp tls)'
|
||||
'--syslog-drain-interval=[interval over which checking is done for new build logs to send to syslog server]: :_concourse_durations'
|
||||
'--syslog-ca-cert=[paths to PEM-encoded CA cert files to use to verify the Syslog server SSL cert]: :_files'
|
||||
)
|
||||
|
||||
local -a concourse_authentication_configurations=(
|
||||
'--cookie-secure[force sending secure flag on http cookies]'
|
||||
'--auth-duration=[length of time for which tokens are valid]: :_concourse_durations'
|
||||
'--session-signing-key=[file containing an RSA private key, used to sign auth tokens]: :_files'
|
||||
'--password-connector=[connector to use when authenticating via "fly login -u .. -p.."]: :(local ldap)'
|
||||
'*--add-local-user=[list of username:password combinations for all your local users]: :_concourse_username_colon_passwords'
|
||||
'*--add-client=[list of client_id:client_secret combinations]:pair'
|
||||
# Authentication Main Team
|
||||
'*--main-team-local-user=[list of whitelisted local concourse users]: :_users'
|
||||
{-c,--main-team-config=}'[configuration file for specifying team params]: :_concourse_config_files'
|
||||
# Authentication Main Team Bitbucket Cloud
|
||||
'*--main-team-bitbucket-cloud-user=[list of whitelisted Bitbucket Cloud users]: :_users'
|
||||
'*--main-team-bitbucket-cloud-team=[list of whitelisted Bitbucket Cloud teams]:team'
|
||||
# Authentication Main Team CloudFoundry
|
||||
'*--main-team-cf-user=[list of whitelisted CloudFoundry users]: :_users'
|
||||
'*--main-team-cf-org=[list of whitelisted CloudFoundry orgs]:org name'
|
||||
'*--main-team-cf-space=[list of whitelisted CloudFoundry spaces]:space name'
|
||||
'*--main-team-cf-space-with-any-role=[a whitelisted CloudFoundry space for users with any role]:space'
|
||||
'*--main-team-cf-space-with-developer-role=[a whitelisted CloudFoundry space for users with the developer role]:space'
|
||||
'*--main-team-cf-space-with-auditor-role=[a whitelisted CloudFoundry space for users with the auditor role]:space'
|
||||
'*--main-team-cf-space-with-manager-role=[a whitelisted CloudFoundry space for users with the manager role]:space'
|
||||
'*--main-team-cf-space-guid=[a whitelisted CloudFoundry space guid]:space_guid'
|
||||
# Authentication Main Team GitHub
|
||||
'*--main-team-github-user=[list of whitelisted GitHub users]: :_users'
|
||||
'*--main-team-github-org=[list of whitelisted GitHub orgs]:org name'
|
||||
'*--main-team-github-team=[list of whitelisted GitHub teams]:team name'
|
||||
# Authentication Main Team GitLab
|
||||
'*--main-team-gitlab-user=[list of whitelisted GitLab users]: :_users'
|
||||
'*--main-team-gitlab-group=[list of whitelisted GitLab groups]:group name'
|
||||
# Authentication Main Team LDAP
|
||||
'*--main-team-ldap-user=[list of whitelisted LDAP users]: :_users'
|
||||
'*--main-team-ldap-group=[list of whitelisted LDAP groups]:group name'
|
||||
# Authentication Main Team Microsoft
|
||||
'*--main-team-microsoft-user=[a whitelisted Microsoft user]: :_users'
|
||||
'*--main-team-microsoft-group=[a whitelisted Microsoft group]:group name'
|
||||
# Authentication Main Team OAuth2
|
||||
'*--main-team-oauth-user=[list of whitelisted OAuth2 users]: :_users'
|
||||
'*--main-team-oauth-group=[list of whitelisted OAuth2 groups]:group name'
|
||||
# Authentication Main Team OIDC
|
||||
'*--main-team-oidc-user=[list of whitelisted OIDC users]: :_users'
|
||||
'*--main-team-oidc-group=[list of whitelisted OIDC groups]:group name'
|
||||
# Authentication Main Team SAML
|
||||
'*--main-team-saml-user=[a whitelisted SAML user]: :_users'
|
||||
'*--main-team-saml-group=[a whitelisted SAML group]:group name'
|
||||
# Authentication Bitbucket Cloud
|
||||
'--bitbucket-cloud-client-id=[client id]:client ID'
|
||||
'--bitbucket-cloud-client-secret=[client secret]:client secret'
|
||||
# Authentication CloudFoundry
|
||||
'--cf-client-id=[client id]:client ID'
|
||||
'--cf-client-secret=[client secret]:client secret'
|
||||
'--cf-api-url=[the base API URL of your CF deployment]: :_urls'
|
||||
'--cf-ca-cert=[CA Certificate]: :_files'
|
||||
'--cf-skip-ssl-validation[skip SSL validation]'
|
||||
# Authentication GitHub
|
||||
'--github-client-id=[client id]:client ID'
|
||||
'--github-client-secret=[client secret]:client secret'
|
||||
'--github-host=[hostname of GitHub Enterprise deployment]: :_hosts'
|
||||
'--github-ca-cert=[CA certificate of GitHub Enterprise deployment]: :_files'
|
||||
# Authentication GitLab
|
||||
'--gitlab-client-id=[client id]:client ID'
|
||||
'--gitlab-client-secret=[client secret]:client secret'
|
||||
'--gitlab-host=[hostname of Gitlab Enterprise deployment]: :_hosts'
|
||||
# Authentication LDAP
|
||||
'--ldap-display-name=[the auth provider name displayed to users on the login page]:display name'
|
||||
'--ldap-host=[the host and optional port of the LDAP server]: :_hosts'
|
||||
'--ldap-bind-dn=[bind DN for searching LDAP users and groups]:bind DN'
|
||||
'--ldap-bind-pw=[bind Password for the user specified by bind-dn]:bind password'
|
||||
'--ldap-insecure-no-ssl[required if LDAP host does not use TLS]'
|
||||
'--ldap-insecure-skip-verify[skip certificate verification]'
|
||||
'--ldap-start-tls[start on insecure port, then negotiate TLS]'
|
||||
'--ldap-ca-cert=[CA certificate]: :_files'
|
||||
'--ldap-username-prompt=[propmt when logging in through the UI]:prompt'
|
||||
'--ldap-user-search-base-dn= [baseDN to start the search from]:baseDN'
|
||||
'--ldap-user-search-filter=[optional filter to apply when searching the directory]:filter'
|
||||
'--ldap-user-search-username=[attribute to match against the inputted username]:attribute'
|
||||
"--ldap-user-search-scope=[can either be: 'sub' - search the whole sub tree or 'one' - only search one level]:scope:((sub one))"
|
||||
'--ldap-user-search-id-attr=[a mapping of attributes on the user entry to claims]:attribute mapping'
|
||||
'--ldap-user-search-email-attr=[a mapping of attributes on the user entry to claims]:attribute mapping'
|
||||
'--ldap-user-search-name-attr=[a mapping of attributes on the user entry to claims]:attribute mapping'
|
||||
'--ldap-group-search-base-dn=[baseDN to start the search from]:baseDN'
|
||||
'--ldap-group-search-filter=[optional filter to apply when searching the directory]:filter'
|
||||
"--ldap-group-search-scope=[can either be: 'sub' - search the whole sub tree or 'one' - only search one level]:scope:(sub one)"
|
||||
"--ldap-group-search-user-attr=[adds an additional requirement to the filter that an attribute in the group match the user's attribute value]:attribute"
|
||||
"--ldap-group-search-group-attr=[adds an additional requirement to the filter that an attribute in the group match the user's attribute value]:attribute"
|
||||
'--ldap-group-search-name-attr=[the attribute of the group that represents its name]:attribute'
|
||||
# Authentication Microsoft
|
||||
'--microsoft-client-id=[Microsoft client ID]:id'
|
||||
'--microsoft-client-secret=[Microsoft client secret]:secret'
|
||||
'--microsoft-tenant=[Microsoft Tenant limitation]:tenant'
|
||||
'--microsoft-groups=[allowed Active Directory Groups]:groups'
|
||||
'--microsoft-only-security-groups[only fetch security groups]'
|
||||
# Authentication OAuth2
|
||||
'--oauth-display-name=[the auth provider name displayed to users on the login page]:display name'
|
||||
'--oauth-client-id=[client id]:client ID'
|
||||
'--oauth-client-secret=[client secret]:client secret'
|
||||
'--oauth-auth-url=[Authorization URL]: :_urls'
|
||||
'--oauth-token-url=[Token URL]: :_urls'
|
||||
'--oauth-userinfo-url=[UserInfo URL]: :_urls'
|
||||
'*--oauth-scope=[any additional scopes that need to be requested during authorization]:scope'
|
||||
'--oauth-groups-key=[the groups key indicates which claim to use to map external groups to Concourse teams]:group key'
|
||||
'--oauth-user-id-key=[the user id key indicates which claim to use to map an external user id to a Concourse user id]:id key'
|
||||
'--oauth-user-name-key=[the user name key indicates which claim to use to map an external user name to a Concourse user name]:name key'
|
||||
'--oauth-ca-cert=[CA Certificate]: :_files'
|
||||
'--oauth-skip-ssl-validation[skip SSL validation]'
|
||||
# Authorization OIDC
|
||||
'--oidc-display-name=[the auth provider name displayed to users on the login page]:display name'
|
||||
'--oidc-issuer=[An OIDC issuer URL that will be used to discover provider configuration]: :_urls'
|
||||
'--oidc-client-id=[client id]:client ID'
|
||||
'--oidc-client-secret=[client secret]:client secret'
|
||||
'*--oidc-scope=[any additional scopes that need to be requested during authorization]:scope'
|
||||
'--oidc-groups-key=[the groups key indicates which claim to use to map external groups to Concourse teams]:group key'
|
||||
'--oidc-user-name-key=[the user name key indicates which claim to use to map an external user name to a Concourse user name]:user name key'
|
||||
'--oidc-ca-cert=[CA Certificate]: :_files'
|
||||
'--oidc-skip-ssl-validation[skip SSL validation]'
|
||||
'--oidc-disable-groups[disable OIDC groups claims]'
|
||||
'--oidc-skip-email-verified-validation[ignore the email_verified claim from the upstream provider]'
|
||||
'--oidc-disable-get-user-info[disable not query the additional claims]'
|
||||
# Authorization SAML
|
||||
'--saml-display-name=[SAML auth provider name displayed to users on the login page]:name'
|
||||
'--saml-sso-url=[SAML SSO URL used for POST value]:url:_urls'
|
||||
'--saml-ca-cert=[SAML CA Certificate]: :_files'
|
||||
"--saml-entity-issuer=[SAML manually specify dex's issuer value]"
|
||||
'--saml-sso-issuer=[issuer value expected in the SAML response]:issuer'
|
||||
'--saml-username-attr=[SAML concourse user name]:name'
|
||||
'--saml-email-attr=[SAML concourse email]:name'
|
||||
'--saml-groups-attr=[SAML concourse teams]:name'
|
||||
'--saml-groups-delim=[groups are returned as string with this delimiter]:delimiter'
|
||||
'--saml-name-id-policy-format=[requested format of the NameID]:format'
|
||||
'--saml-skip-ssl-validation[SAML skip SSL validation]'
|
||||
)
|
||||
|
||||
local -a concourse_feature_flags=(
|
||||
'--enable-global-resources[enable equivalent resources across pipelines and teams to share a single version history]'
|
||||
'--enable-redact-secrets[enable redacting secrets in build logs]'
|
||||
'--enable-rerun-when-worker-disappears[enable automatically build rerun when worker disappears]'
|
||||
'--enable-across-step[enable the experimental across step to be used in jobs]'
|
||||
'--enable-pipeline-auditing[enable pipeline instances]'
|
||||
'--enable-p2p-volume-streaming[enable P2P volume streaming]'
|
||||
'--enable-cache-streamed-volumes[streamed resource volumes will be cached on the destination]'
|
||||
'--enable-resource-causality[enable the resource causality page]'
|
||||
)
|
||||
|
||||
local -a concourse_tsa_configurations=(
|
||||
'--tsa-log-level=[minimum level of logs to see]: :_concourse_log_levels'
|
||||
'--tsa-bind-ip=[IP address on which to listen for SSH]: :_concourse_ip_addresses'
|
||||
'--tsa-peer-address=[network address of this web node, reachable by other web nodes]: :_urls'
|
||||
'--tsa-bind-port=[port on which to listen for SSH]: :_concourse_ports'
|
||||
'--tsa-debug-bind-ip=[IP address on which to listen for the pprof debugger endpoints]: :_concourse_ip_addresses'
|
||||
'--tsa-debug-bind-port=[port on which to listen for the pprof debugger endpoints]: :_concourse_ports'
|
||||
'--tsa-host-key=[path to private key to use for the SSH server]: :_files'
|
||||
'--tsa-authorized-keys=[path to file containing keys to authorize, in SSH authorized_keys format]: :_files'
|
||||
'--tsa-team-authorized-keys=[path to file containing keys to authorize, in SSH authorized_keys format]: :_concourse_name_colon_paths'
|
||||
'--tsa-atc-url=[ATC API endpoints to which workers will be registered]: :_urls'
|
||||
'--tsa-client-id=[client used to fetch a token from the auth server]:id'
|
||||
'--tsa-client-secret=[client used to fetch a token from the auth server]:secret'
|
||||
'--tsa-token-url=[token endpoint of the auth server]: :_urls'
|
||||
'*--tsa-scope=[scopes to request from the auth server]'
|
||||
'--tsa-heartbeat-interval=[interval on which to heartbeat workers to the ATC]: :_concourse_durations'
|
||||
'--tsa-garden-request-timeout=[how long to wait for requests to Garden to complete]: :_concourse_durations'
|
||||
'--tsa-cluster-name=[a name for this Concourse cluster]:name'
|
||||
'--tsa-log-cluster-name=[log cluster name]:name'
|
||||
)
|
||||
|
||||
case $command in
|
||||
(quickstart)
|
||||
_concourse_quickstart && ret=0
|
||||
;;
|
||||
(web)
|
||||
_concourse_web && ret=0
|
||||
;;
|
||||
esac
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
(( $+functions[_concourse_generate-key] )) ||
|
||||
_concourse_generate-key() {
|
||||
_arguments \
|
||||
'(- : *)'{-h,--help}'[display help information]' \
|
||||
'(-t --type)'{-t,--type=}'[the type of key to generate]:key type:(rsa ssh)' \
|
||||
'(-f --filename)'{-f,--filename=}'[file path where the key shall be created. When generating ssh keys, the public key will be stored in a file with the same name but with .pub appended]: :_files' \
|
||||
'(-b --bits)'{-b,--bits=}'[the number of bits in the key to create]:integer'
|
||||
}
|
||||
|
||||
(( $+functions[_concourse_land-worker_args] )) ||
|
||||
_concourse_land-worker_args() {
|
||||
_arguments -C \
|
||||
(( $+functions[_concourse_land-worker] )) ||
|
||||
_concourse_land-worker() {
|
||||
_arguments \
|
||||
'(- : *)'{-h,--help}'[display help information]' \
|
||||
'--name=[the name of the worker you wish to land]:worker name' \
|
||||
'*--tsa-host=[TSA host to forward the worker through]: :_concourse_host_colon_ports' \
|
||||
|
|
@ -76,263 +519,193 @@ _concourse_land-worker_args() {
|
|||
'--tsa-worker-private-key=[file containing a public key to expect from the TSA]: :_files'
|
||||
}
|
||||
|
||||
(( $+functions[_concourse_migrate_args] )) ||
|
||||
_concourse_migrate_args() {
|
||||
_arguments -C \
|
||||
(( $+functions[_concourse_migrate] )) ||
|
||||
_concourse_migrate() {
|
||||
_arguments \
|
||||
'(- : *)'{-h,--help}'[display help information]' \
|
||||
'--encryption-key=[a 16 or 32 length key used to encrypt sensitive information before storing it in the database]:encryption key' \
|
||||
'(- : *)--current-db-version[print the current database version and exit]' \
|
||||
'(- : *)--supported-db-version[print the max supported database version and exit]' \
|
||||
'(- : *)--migrate-db-to-version=[migrate to the specified database version and exit]:database version' \
|
||||
'--encryption-key=[a 16 or 32 length key used to encrypt sensitive information before storing it in the database]:encryption key' \
|
||||
'--postgres-host=[the host to connect to]: :_hosts' \
|
||||
'--postgres-port=[the port to connect to]: :_concourse_ports' \
|
||||
'--postgres-socket=[path to a UNIX domain socket to connect to]: :_files' \
|
||||
'--postgres-user=[the user to sign in as]: :_users' \
|
||||
'--postgres-password=[the user'\''s password]:password' \
|
||||
'--postgres-sslmode=[whether or not to use SSL]:SSL mode:((disable require verify-ca verify-full))' \
|
||||
'--postgres-ca-cert=[CA cert file location, to verify when connecting with SSL]: :_files' \
|
||||
'--postgres-client-cert=[client cert file location]: :_files' \
|
||||
'--postgres-client-key=[client key file location]: :_files' \
|
||||
'--postgres-connect-timeout=[dialing timeout]:duration' \
|
||||
'--postgres-database=[the name of the database to use]:database name'
|
||||
'--migrate-db-to-version=[migrate to the specified database version and exit]:database version' \
|
||||
'--migrate-to-latest-version[migrate to the latest migration version and exit]' \
|
||||
$concourse_postgres_configurations[@] \
|
||||
}
|
||||
|
||||
(( $+functions[_concourse_retire-worker_args] )) ||
|
||||
_concourse_retire-worker_args() {
|
||||
_arguments -C \
|
||||
(( $+functions[_concourse_quickstart] )) ||
|
||||
_concourse_quickstart() {
|
||||
local -a concourse_worker_configurations=(
|
||||
'--worker-name=[name to set for the worker during registration]:name'
|
||||
'--worker-tag=[tag to set during registration]:tag'
|
||||
'--worker-team=[name of the team that this worker will be assigned to]:team'
|
||||
'--worker-http-proxy=[HTTP proxy endpoint to use for containers]:proxy:_urls'
|
||||
'--worker-https-proxy=[HTTPS proxy endpoint to use for containers]:proxy:_urls'
|
||||
'*--worker-no-proxy=[blacklist of addresses to skip the proxy when reaching]:url:_urls'
|
||||
'--worker-ephemeral[worker will be immediately removed upon stalling]'
|
||||
'--worker-certs-dir=[directory to use when creating the resource certificates volume]:dir:_files -/'
|
||||
'--worker-work-dir=[directory in which to place container data]:dir:_files -/'
|
||||
'--worker-bind-ip=[IP address on which to listen for the Garden server]:ip'
|
||||
'--worker-bind-port=[port on which to listen for the Garden server]:port'
|
||||
'--worker-debug-bind-ip=[IP address on which to listen for the pprof debugger endpoints]:ip'
|
||||
'--worker-debug-bind-port=[port on which to listen for the pprof debugger endpoints]:port'
|
||||
'--worker-healthcheck-bind-ip=[IP address on which to listen for health checking requests]:ip'
|
||||
'--worker-healthcheck-bind-port=[port on which to listen for health checking requests]:port'
|
||||
'--worker-healthcheck-timeout=[HTTP timeout for the full duration of health checking]: :_concourse_durations'
|
||||
'--worker-sweep-interval=[interval on which containers and volumes will be garbage collected from the worker]: :_concourse_durations'
|
||||
'--worker-volume-sweeper-max-in-flight=[maximum number of volumes which can be swept in parallel]:number'
|
||||
'--worker-container-sweeper-max-in-flight=[maximum number of containers which can be swept in parallel]:number'
|
||||
'--worker-rebalance-interval=[duration after which the registration should be swapped to another random SSH gateway]: :_concourse_durations'
|
||||
'--worker-connection-drain-timeout=[duration after which a worker should give up draining forwarded connections on shutdown]: :_concourse_durations'
|
||||
'--worker-external-garden-url=[API endpoint of an externally managed Garden server to use instead of running the embedded Garden server]:url:_urls'
|
||||
'--worker-resource-types=[path to directory containing resource types the worker should advertise]:dir:_files -/'
|
||||
'--worker-log-level=[minimum level of logs to see]:level:_concourse_log_levels'
|
||||
|
||||
# TSA Configuration:
|
||||
'--worker-tsa-host=[TSA host to forward the worker through]:host'
|
||||
'--worker-tsa-public-key=[file containing a public key to expect from the TSA]:file:_files'
|
||||
'--worker-tsa-worker-private-key=[file containing the private key to use when authenticating to the TSA]:file:_files'
|
||||
|
||||
# Tracing:
|
||||
'--worker-tracing-service-name=[service name to attach to traces as metadata]:name'
|
||||
'--worker-tracing-attribute=[attributes to attach to traces as metadata]:attr'
|
||||
'--worker-tracing-honeycomb-api-key=[honeycomb.io api key]:key'
|
||||
'--worker-tracing-honeycomb-dataset=[honeycomb.io dataset name]:dataset'
|
||||
'--worker-tracing-jaeger-endpoint=[jaeger http-based thrift collector]:endpoint'
|
||||
'--worker-tracing-jaeger-tags=[tags to add to the components]:tags'
|
||||
'--worker-tracing-jaeger-service=[jaeger process service name]:service'
|
||||
"--worker-tracing-stackdriver-projectid=[GCP's Project ID]:id"
|
||||
'--worker-tracing-otlp-address=[otlp address to send traces to]:address'
|
||||
'--worker-tracing-otlp-header=[headers to attach to each tracing message]:header'
|
||||
'--worker-tracing-otlp-use-tls[whether to use tls or not]'
|
||||
|
||||
# Runtime Configuration
|
||||
'--worker-runtime=[runtime to use with the worker]:runtime:(guardian containerd houdini)'
|
||||
|
||||
# Guardian Configuration
|
||||
'--worker-garden-bin=[path to a garden server executable]:bin'
|
||||
'--worker-garden-request-timeout=[how long to wait for requests to the Garden server to complete]:time:_concourse_durations'
|
||||
'--worker-garden-config=[path to a config file to use for the Garden backend]:path:_files'
|
||||
|
||||
# DNS Proxy Configuration
|
||||
'--worker-garden-dns-proxy-enable[enable proxy DNS server]'
|
||||
|
||||
# Container Networking
|
||||
'--worker-garden-network-pool=[network range to use for dynamically allocated container subnets]:range'
|
||||
|
||||
# Limits:
|
||||
'--worker-garden-max-containers=[maximum container capacity]:capacity'
|
||||
|
||||
# Containerd Configuration:
|
||||
'--worker-containerd-config=[path to a config file to use for the Containerd daemon]:file:_files'
|
||||
'--worker-containerd-bin=[path to a containerd executable]:file:_files'
|
||||
'--worker-containerd-init-bin=[path to an init executable]:file:_files'
|
||||
'--worker-containerd-seccomp-profile=[path to a seccomp filter override]:file:_files'
|
||||
'--worker-containerd-oci-hooks-dir=[path to the oci hooks dir]:dir:_files -/'
|
||||
'--worker-containerd-cni-plugins-dir=[path to CNI network plugins]:dir:_files -/'
|
||||
'--worker-containerd-request-timeout=[how long to wait for requests to Containerd to complete]: :_concourse_durations'
|
||||
'--worker-containerd-max-containers=[max container capacity]:capacity'
|
||||
'--worker-containerd-privileged-mode=[how many privileges privileged containers get]:type:(full fuse-only ignore)'
|
||||
|
||||
# Containerd Networking
|
||||
"--worker-containerd-external-ip=[IP address to use to reach container's mapped ports]:ip"
|
||||
'--worker-containerd-dns-server=[DNS server IP address to use instead of automatically determined servers]:ip'
|
||||
'--worker-containerd-additional-hosts=[additional entries to add to /etc/hosts in containers]:hosts'
|
||||
'--worker-containerd-restricted-network=[network ranges to which traffic from containers will be restricted]:range'
|
||||
'--worker-containerd-network-pool=[network range to use for dynamically allocated container subnets]:range'
|
||||
'--worker-containerd-mtu=[MTU size for container network interfaces]:size'
|
||||
"--worker-containerd-allow-host-access[allow containers to reach the host's network]"
|
||||
|
||||
# DNS Proxy Configuration:
|
||||
'--worker-containerd-dns-proxy-enable[Enable proxy DNS server]'
|
||||
|
||||
# IPv6 Configuration:
|
||||
'--worker-containerd-v6-enable[enable IPv6 networking]'
|
||||
'--worker-containerd-v6-pool=[IPv6 network range to use for dynamically allocated container addresses]:range'
|
||||
'--worker-containerd-v6-disable-masquerade [Masquerade container traffic with worker address]:address'
|
||||
|
||||
# Baggageclaim Configuration:
|
||||
'--worker-baggageclaim-log-level=[minimum level of logs to see]: :_concourse_log_levels'
|
||||
'--worker-baggageclaim-bind-ip=[IP address on which to listen for API traffic]:ip'
|
||||
'--worker-baggageclaim-bind-port=[port on which to listen for API traffic]:port'
|
||||
'--worker-baggageclaim-debug-bind-ip=[IP address on which to listen for the pprof debugger endpoints]:ip'
|
||||
'--worker-baggageclaim-debug-bind-port=[port on which to listen for the pprof debugger endpoints]:port'
|
||||
'--worker-baggageclaim-p2p-interface-name-pattern=[regular expression to match a network interface for p2p streaming]:pattern'
|
||||
'--worker-baggageclaim-p2p-interface-family=[4 for IPv4 and 6 for IPv6]:type:(4 6)'
|
||||
'--worker-baggageclaim-volumes=[directory in which to place volume data]:dir:_files -/'
|
||||
'--worker-baggageclaim-driver=[driver to use for managing volumes]:type:(detect naive btrfs overlay)'
|
||||
'--worker-baggageclaim-btrfs-bin=[path to btrfs binary]:path:_files'
|
||||
'--worker-baggageclaim-mkfs-bin=[path to mkfs binary]:path:_files'
|
||||
'--worker-baggageclaim-overlays-dir=[path to directory in which to store overlay data]:dir:_files -/'
|
||||
'--worker-baggageclaim-disable-user-namespaces[disable remapping of user/group IDs in unprivileged volumes]:namespace'
|
||||
)
|
||||
|
||||
_arguments \
|
||||
$concourse_web_configurations[@] \
|
||||
"--enable-lets-encrypt[automatically configure TLS certificate via Let's Encrypt/ACME]" \
|
||||
"--lets-encrypt-acme-url=[URL of ACME CA directory endpoint]:url:_urls" \
|
||||
$concourse_postgres_configurations[@] \
|
||||
$concourse_credential_manager_configurations[@] \
|
||||
'--signing-key-check-interval=[how often to check for outdated or expired signing keys(default: 10m)]: :_concourse_durations' \
|
||||
'--signing-key-rotation-period=[after which time a new signing key for the idtoken secrets provider should be generated]: :_concourse_durations' \
|
||||
'--signing-key-rotation-period=[how long a key should still be published after a new key has been generated]: :_concourse_durations' \
|
||||
$concourse_container_placement_strategy_configurations[@] \
|
||||
$concourse_metric_configurations[@] \
|
||||
$concourse_tracing_configurations[@] \
|
||||
$concourse_policy_check_agent_configurations[@] \
|
||||
$concourse_web_server_configurations[@] \
|
||||
$concourse_gc_configurations[@] \
|
||||
$concourse_syslog_configurations[@] \
|
||||
$concourse_authentication_configurations[@] \
|
||||
$concourse_feature_flags[@] \
|
||||
$concourse_tsa_configurations[@] \
|
||||
$concourse_worker_configurations[@]
|
||||
}
|
||||
|
||||
(( $+functions[_concourse_retire-worker] )) ||
|
||||
_concourse_retire-worker() {
|
||||
_arguments \
|
||||
'(- : *)'{-h,--help}'[display help information]' \
|
||||
'--name=[the name of the worker you wish to retire]:worker name' \
|
||||
'--team=[the team name of the worker you with to retire]:team name' \
|
||||
'*--tsa-host=[TSA host to forward the worker through]: :_concourse_host_colon_ports' \
|
||||
'--tsa-public-key=[file containing a public key to expect from the TSA]: :_files' \
|
||||
'--tsa-worker-private-key=[file containing a public key to expect from the TSA]: :_files'
|
||||
}
|
||||
|
||||
(( $+functions[_concourse_web_args] )) ||
|
||||
_concourse_web_args() {
|
||||
_arguments -C \
|
||||
(( $+functions[_concourse_web] )) ||
|
||||
_concourse_web() {
|
||||
local -a placement_strategies=(
|
||||
volume-locality random fewest-build-containers limit-active-tasks limit-active-containers
|
||||
limit-active-volumes
|
||||
)
|
||||
local -a second_placement_strategies=(
|
||||
random fewest-build-containers limit-active-tasks limit-active-containers limit-active-volumes
|
||||
)
|
||||
|
||||
_arguments \
|
||||
'(- : *)'{-h,--help}'[display help information]' \
|
||||
'--peer-address=[network address of this web node, reachable by other web nodes]: :_concourse_host_colon_ports' \
|
||||
'--log-level=[minimum level of logs to see]: :_concourse_log_levels' \
|
||||
'--bind-ip=[IP address on which to listen for web traffic]: :_concourse_ip_addresses' \
|
||||
'--bind-port=[port on which to listen for HTTP traffic]: :_concourse_ports' \
|
||||
'--tls-bind-port=[port on which to listen for HTTPS traffic]: :_concourse_ports' \
|
||||
'--tls-cert=[file containing an SSL certificate]: :_files' \
|
||||
'--tls-key=[file containing an RSA private key, used to encrypt HTTPS traffic]: :_files' \
|
||||
'--external-url=[URL used to reach any ATC from the outside world]: :_urls' \
|
||||
'--encryption-key=[a 16 or 32 length key used to encrypt sensitive information before storing it in the database]:encryption key' \
|
||||
'--old-encryption-key=[encryption key previously used for encrypting sensitive information]:encryption key' \
|
||||
'--debug-bind-ip=[IP address on which to listen for the pprof debugger endpoints]: :_concourse_ip_addresses' \
|
||||
'--debug-bind-port=[port on which to listen for the pprof debugger endpoints]: :_concourse_ports' \
|
||||
'--intercept-idle-timeout=[length of time for a intercepted session to be idle before terminating]: :_concourse_durations' \
|
||||
'--enable-global-resources[enable equivalent resources across pipelines and teams to share a single version history]' \
|
||||
'--global-resource-check-timeout=[time limit on checking for new versions of resources]: :_concourse_durations' \
|
||||
'--resource-checking-interval=[interval on which to check for new versions of resources]: :_concourse_durations' \
|
||||
'--resource-type-checking-interval=[interval on which to check for new versions of resource types]: :_concourse_durations' \
|
||||
'--container-placement-strategy=[method by which a worker is selected during container placement]:strategy:((volume-locality random fewest-build-containers))' \
|
||||
'--baggageclaim-response-header-timeout=[how long to wait for Baggageclaim to send the response header]: :_concourse_durations' \
|
||||
'--cli-artifacts-dir=[directory containing downloadable CLI binaries]: :_files -/' \
|
||||
'--log-db-queries[log database queries]' \
|
||||
'--build-tracker-interval=[interval on which to run build tracking]: :_concourse_durations' \
|
||||
'--default-build-logs-to-retain=[default build logs to retain, 0 means all]:number' \
|
||||
'--max-build-logs-to-retain=[maximum build logs to retain, 0 means not specified]:number' \
|
||||
'--default-days-to-retain-build-logs=[default days to retain build logs. 0 means unlimited]:number' \
|
||||
'--max-days-to-retain-build-logs=[maximum days to retain build logs, 0 means not specified]:number' \
|
||||
'--default-task-cpu-limit=[default max number of cpu shares per task, 0 means unlimited]:number' \
|
||||
'--default-task-memory-limit=[default maximum memory per task, 0 means unlimited]:number' \
|
||||
'--enable-build-auditing[enable auditing for all api requests connected to builds]' \
|
||||
'--enable-container-auditing[enable auditing for all api requests connected to containers]' \
|
||||
'--enable-job-auditing[enable auditing for all api requests connected to jobs]' \
|
||||
'--enable-pipeline-auditing[enable auditing for all api requests connected to pipelines]' \
|
||||
'--enable-resource-auditing[enable auditing for all api requests connected to resources]' \
|
||||
'--enable-system-auditing[enable auditing for all api requests connected to system transactions]' \
|
||||
'--enable-team-auditing[enable auditing for all api requests connected to teams]' \
|
||||
'--enable-worker-auditing[enable auditing for all api requests connected to workers]' \
|
||||
'--enable-volume-auditing[enable auditing for all api requests connected to volumes]' \
|
||||
'--postgres-host=[the host to connect to]: :_hosts' \
|
||||
'--postgres-port=[the port to connect to]: :_concourse_ports' \
|
||||
'--postgres-socket=[path to a UNIX domain socket to connect to]: :_files' \
|
||||
'--postgres-user=[the user to sign in as]: :_users' \
|
||||
'--postgres-password=[the user'\''s password]:password' \
|
||||
'--postgres-sslmode=[whether or not to use SSL]:SSL mode:((disable require verify-ca verify-full))' \
|
||||
'--postgres-ca-cert=[CA cert file location, to verify when connecting with SSL]: :_files' \
|
||||
'--postgres-client-cert=[client cert file location]: :_files' \
|
||||
'--postgres-client-key=[client key file location]: :_files' \
|
||||
'--postgres-connect-timeout=[dialing timeout]: :_concourse_durations' \
|
||||
'--postgres-database=[the name of the database to use]:database name' \
|
||||
'--secret-retry-attempts=[the number of attempts secret will be retried to be fetched, in case a retriable error happens]:number' \
|
||||
'--secret-retry-interval=[the interval between secret retry retrieval attempts]: :_concourse_durations' \
|
||||
'--secret-cache-enabled[enable in-memory cache for secrets]' \
|
||||
'--secret-cache-duration=[if the cache is enabled, secret values will be cached for not longer than this duration]: :_concourse_durations' \
|
||||
'--secret-cache-purge-interval=[if the cache is enabled, expired items will be removed on this internal]: :_concourse_durations' \
|
||||
'--credhub-url=[CredHub server address used to access secrets]: :_urls' \
|
||||
'--credhub-path-prefix=[path under which to namespace credential lookup]:path' \
|
||||
'--credhub-ca-cert=[path to PEM-encoded CA cert files to use to verify the CredHub server SSL cert]: :_files' \
|
||||
'--credhub-client-cert=[path to the client certificate for mutual TLS authorization]: :_files' \
|
||||
'--credhub-client-key=[path to the client private key for mutual TLS authorization]: :_files' \
|
||||
'--credhub-insecure-skip-verify[enable insecure SSL verification]' \
|
||||
'--credhub-client-id=[client ID for CredHub authorization]:client ID' \
|
||||
'--credhub-client-secret=[client secret for CredHub authorization]:client secret' \
|
||||
'--kubernetes-in-cluster[enables the in-cluster client]' \
|
||||
'--kubernetes-config-path=[path to Kubernetes config when running ATC outside Kubernetes]: :_files' \
|
||||
'--kubernetes-namespace-prefix=[prefix to use for Kubernetes namespaces under which secrets will be looked up]:prefix' \
|
||||
'--aws-secretsmanager-access-key=[AWS Access key ID]:access key' \
|
||||
'--aws-secretsmanager-secret-key=[AWS Secret Access Key]:secret key' \
|
||||
'--aws-secretsmanager-session-token=[AWS Session Token]:session token' \
|
||||
'--aws-secretsmanager-region=[AWS region to send requests to]:region' \
|
||||
'--aws-secretsmanager-pipeline-secret-template=[AWS Secrets Manager secret identifier template used for pipeline specific parameter]:template' \
|
||||
'--aws-secretsmanager-team-secret-template=[AWS Secrets Manager secret identifier template used for team specific parameter]:template' \
|
||||
'--aws-ssm-access-key=[AWS Access key ID]:access key' \
|
||||
'--aws-ssm-secret-key=[AWS Secret Access Key]:secret key' \
|
||||
'--aws-ssm-session-token=[AWS Session Token]:session token' \
|
||||
'--aws-ssm-region=[AWS region to send requests to]:region' \
|
||||
'--aws-ssm-pipeline-secret-template=[AWS SSM parameter name template used for pipeline specific parameter]:template' \
|
||||
'--aws-ssm-team-secret-template=[AWS SSM parameter name template used for team specific parameter]:template' \
|
||||
'--vault-url=[vault server address used to access secrets]: :_urls' \
|
||||
'--vault-path-prefix=[path under which to namespace credential lookup]:prefix' \
|
||||
'--vault-shared-path=[path under which to lookup shared credentials]:path' \
|
||||
'--vault-ca-cert=[path to a PEM-encoded CA cert file to use to verify the vault server SSL cert]: :_files' \
|
||||
'--vault-ca-path=[path to a directory of PEM-encoded CA cert files to verify the vault server SSL cert]: :_files -/' \
|
||||
'--vault-client-cert=[path to the client certificate for Vault authorization]: :_files' \
|
||||
'--vault-client-key=[path to the client private key for Vault authorization]: :_files' \
|
||||
'--vault-server-name=[if set, is used to set the SNI host when connecting via TLS]:server name' \
|
||||
'--vault-insecure-skip-verify[enable insecure SSL verification]' \
|
||||
'--vault-client-token=[client token for accessing secrets within the Vault server]:client token' \
|
||||
'--vault-auth-backend=[auth backend to use for logging in to Vault]:auth backend' \
|
||||
'--vault-auth-backend-max-ttl=[time after which to force a re-login]: :_concourse_durations' \
|
||||
'--vault-retry-max=[the maximum time between retries when logging in or re-authing a secret]: :_concourse_durations' \
|
||||
'--vault-retry-initial=[the initial time between retries when logging in or re-authing a secret]: :_concourse_durations' \
|
||||
'*--vault-auth-param=[parameter to pass when logging in via the backend]: :_concourse_name_colon_values' \
|
||||
{-n,--noop}'[don'\''t actually do any automatic scheduling or checking]' \
|
||||
'--worker-garden-url=[a Garden API endpoint to register as a worker]: :_urls' \
|
||||
'--worker-baggageclaim-url=[a Baggageclaim API endpoint to register with the worker]: :_urls' \
|
||||
'*--worker-resource=[a resource type to advertise for the worker]: :_concourse_type_colon_images' \
|
||||
'--metrics-host-name=[host string to attach to emitted metrics]: :_hosts' \
|
||||
'*--metrics-attribute=[a key-value attribute to attach to emitted metrics]: :_concourse_name_colon_values' \
|
||||
'--capture-error-metrics[enable capturing of error log metrics]' \
|
||||
'--datadog-agent-host=[datadog agent host to expose dogstatsd metrics]: :_hosts' \
|
||||
'--datadog-agent-port=[datadog agent port to expose dogstatsd metrics]: :_concourse_ports' \
|
||||
'--datadog-prefix=[prefix for all metrics to easily find them in Datadog]:prefix' \
|
||||
'--influxdb-url=[influxDB server address to emit points to]: :_urls' \
|
||||
'--influxdb-database=[influxDB database to write points to]:database name' \
|
||||
'--influxdb-username=[influxDB server username]: :_users' \
|
||||
'--influxdb-password=[influxDB server password]:password' \
|
||||
'--influxdb-insecure-skip-verify[skip SSL verification when emitting to InfluxDB]' \
|
||||
'--emit-to-logs[emit metrics to logs]' \
|
||||
'--newrelic-account-id=[new Relic Account ID]:account ID' \
|
||||
'--newrelic-api-key=[new Relic Insights API Key]:API key' \
|
||||
'--newrelic-service-prefix=[an optional prefix for emitted New Relic events]:prefix' \
|
||||
'--prometheus-bind-ip=[IP to listen on to expose Prometheus metrics]: :_concourse_ip_addresses' \
|
||||
'--prometheus-bind-port=[port to listen on to expose Prometheus metrics]: :_concourse_ports' \
|
||||
'--riemann-host=[riemann server address to emit metrics to]: :_hosts' \
|
||||
'--riemann-port=[port of the Riemann server to emit metrics to]: :_concourse_ports' \
|
||||
'--riemann-service-prefix=[an optional prefix for emitted Riemann services]:prefix' \
|
||||
'*--riemann-tag=[tag to attach to emitted metrics]:tag' \
|
||||
'--x-frame-options=[the value to set for X-Frame-Options]:options' \
|
||||
'--cluster-name=[a name for this Concourse cluster, to be displayed on the dashboard page]:name' \
|
||||
'--gc-interval=[interval on which to perform garbage collection]: :_concourse_durations' \
|
||||
'--gc-one-off-grace-period=[period after which one-off build containers will be garbage-collected]: :_concourse_durations' \
|
||||
'--gc-missing-grace-period=[period after which to reap containers and volumes that were created but went missing from the worker]: :_concourse_durations' \
|
||||
'--syslog-hostname=[client hostname with which the build logs will be sent to the syslog server]: :_hosts' \
|
||||
'--syslog-address=[remote syslog server address with port]: :_concourse_host_colon_ports' \
|
||||
'--syslog-transport=[transport protocol for syslog messages]:protocol:((tcp udp tls))' \
|
||||
'--syslog-drain-interval=[interval over which checking is done for new build logs to send to syslog server]: :_concourse_durations' \
|
||||
'--syslog-ca-cert=[paths to PEM-encoded CA cert files to use to verify the Syslog server SSL cert]: :_files' \
|
||||
'--cookie-secure[force sending secure flag on http cookies]' \
|
||||
'--auth-duration=[length of time for which tokens are valid]: :_concourse_durations' \
|
||||
'--session-signing-key=[file containing an RSA private key, used to sign auth tokens]: :_files' \
|
||||
'*--add-local-user=[list of username:password combinations for all your local users]: :_concourse_username_colon_passwords' \
|
||||
'*--main-team-local-user=[list of whitelisted local concourse users]: :_users' \
|
||||
{-c,--main-team-config=}'[configuration file for specifying team params]: :_concourse_config_files' \
|
||||
'*--main-team-bitbucket-cloud-user=[list of whitelisted Bitbucket Cloud users]: :_users' \
|
||||
'*--main-team-bitbucket-cloud-team=[list of whitelisted Bitbucket Cloud teams]:team' \
|
||||
'*--main-team-cf-user=[list of whitelisted CloudFoundry users]: :_users' \
|
||||
'*--main-team-cf-org=[list of whitelisted CloudFoundry orgs]:org name' \
|
||||
'*--main-team-cf-space=[list of whitelisted CloudFoundry spaces]:space name' \
|
||||
'*--main-team-github-user=[list of whitelisted GitHub users]: :_users' \
|
||||
'*--main-team-github-org=[list of whitelisted GitHub orgs]:org name' \
|
||||
'*--main-team-github-team=[list of whitelisted GitHub teams]:team name' \
|
||||
'*--main-team-gitlab-user=[list of whitelisted GitLab users]: :_users' \
|
||||
'*--main-team-gitlab-group=[list of whitelisted GitLab groups]:group name' \
|
||||
'*--main-team-ldap-user=[list of whitelisted LDAP users]: :_users' \
|
||||
'*--main-team-ldap-group=[list of whitelisted LDAP groups]:group name' \
|
||||
'*--main-team-oauth-user=[list of whitelisted OAuth2 users]: :_users' \
|
||||
'*--main-team-oauth-group=[list of whitelisted OAuth2 groups]:group name' \
|
||||
'*--main-team-oidc-user=[list of whitelisted OIDC users]: :_users' \
|
||||
'*--main-team-oidc-group=[list of whitelisted OIDC groups]:group name' \
|
||||
'--bitbucket-cloud-client-id=[client id]:client ID' \
|
||||
'--bitbucket-cloud-client-secret=[client secret]:client secret' \
|
||||
'--cf-client-id=[client id]:client ID' \
|
||||
'--cf-client-secret=[client secret]:client secret' \
|
||||
'--cf-api-url=[the base API URL of your CF deployment]: :_urls' \
|
||||
'--cf-ca-cert=[CA Certificate]: :_files' \
|
||||
'--cf-skip-ssl-validation[skip SSL validation]' \
|
||||
'--github-client-id=[client id]:client ID' \
|
||||
'--github-client-secret=[client secret]:client secret' \
|
||||
'--github-host=[hostname of GitHub Enterprise deployment]: :_hosts' \
|
||||
'--github-ca-cert=[CA certificate of GitHub Enterprise deployment]: :_files' \
|
||||
'--gitlab-client-id=[client id]:client ID' \
|
||||
'--gitlab-client-secret=[client secret]:client secret' \
|
||||
'--gitlab-host=[hostname of Gitlab Enterprise deployment]: :_hosts' \
|
||||
'--ldap-display-name=[the auth provider name displayed to users on the login page]:display name' \
|
||||
'--ldap-host=[the host and optional port of the LDAP server]: :_hosts' \
|
||||
'--ldap-bind-dn=[bind DN for searching LDAP users and groups]:bind DN' \
|
||||
'--ldap-bind-pw=[bind Password for the user specified by bind-dn]:bind password' \
|
||||
'--ldap-insecure-no-ssl[required if LDAP host does not use TLS]' \
|
||||
'--ldap-insecure-skip-verify[skip certificate verification]' \
|
||||
'--ldap-start-tls[start on insecure port, then negotiate TLS]' \
|
||||
'--ldap-ca-cert=[CA certificate]: :_files' \
|
||||
'--ldap-user-search-base-dn= [baseDN to start the search from]:baseDN' \
|
||||
'--ldap-user-search-filter=[optional filter to apply when searching the directory]:filter' \
|
||||
'--ldap-user-search-username=[attribute to match against the inputted username]:attribute' \
|
||||
'--ldap-user-search-scope=[can either be: '\''sub'\'' - search the whole sub tree or '\''one'\'' - only search one level]:scope:((sub one))' \
|
||||
'--ldap-user-search-id-attr=[a mapping of attributes on the user entry to claims]:attribute mapping' \
|
||||
'--ldap-user-search-email-attr=[a mapping of attributes on the user entry to claims]:attribute mapping' \
|
||||
'--ldap-user-search-name-attr=[a mapping of attributes on the user entry to claims]:attribute mapping' \
|
||||
'--ldap-group-search-base-dn=[baseDN to start the search from]:baseDN' \
|
||||
'--ldap-group-search-filter=[optional filter to apply when searching the directory]:filter' \
|
||||
'--ldap-group-search-scope=[can either be: '\''sub'\'' - search the whole sub tree or '\''one'\'' - only search one level]:scope:((sub one))' \
|
||||
'--ldap-group-search-user-attr=[adds an additional requirement to the filter that an attribute in the group match the user'\''s attribute value]:attribute' \
|
||||
'--ldap-group-search-group-attr=[adds an additional requirement to the filter that an attribute in the group match the user'\''s attribute value]:attribute' \
|
||||
'--ldap-group-search-name-attr=[the attribute of the group that represents its name]:attribute' \
|
||||
'--oauth-display-name=[the auth provider name displayed to users on the login page]:display name' \
|
||||
'--oauth-client-id=[client id]:client ID' \
|
||||
'--oauth-client-secret=[client secret]:client secret' \
|
||||
'--oauth-auth-url=[Authorization URL]: :_urls' \
|
||||
'--oauth-token-url=[Token URL]: :_urls' \
|
||||
'--oauth-userinfo-url=[UserInfo URL]: :_urls' \
|
||||
'*--oauth-scope=[any additional scopes that need to be requested during authorization]:scope' \
|
||||
'--oauth-groups-key=[the groups key indicates which claim to use to map external groups to Concourse teams]:group key' \
|
||||
'--oauth-user-id-key=[the user id key indicates which claim to use to map an external user id to a Concourse user id]:id key' \
|
||||
'--oauth-user-name-key=[the user name key indicates which claim to use to map an external user name to a Concourse user name]:name key' \
|
||||
'--oauth-ca-cert=[CA Certificate]: :_files' \
|
||||
'--oauth-skip-ssl-validation[skip SSL validation]' \
|
||||
'--oidc-display-name=[the auth provider name displayed to users on the login page]:display name' \
|
||||
'--oidc-issuer=[An OIDC issuer URL that will be used to discover provider configuration]: :_urls' \
|
||||
'--oidc-client-id=[client id]:client ID' \
|
||||
'--oidc-client-secret=[client secret]:client secret' \
|
||||
'*--oidc-scope=[any additional scopes that need to be requested during authorization]:scope' \
|
||||
'--oidc-groups-key=[the groups key indicates which claim to use to map external groups to Concourse teams]:group key' \
|
||||
'--oidc-user-name-key=[the user name key indicates which claim to use to map an external user name to a Concourse user name]:user name key' \
|
||||
'*--oidc-hosted-domains=[list of whitelisted domains when using Google, only users from a listed domain will be allowed to log in]:domain' \
|
||||
'--oidc-ca-cert=[CA Certificate]: :_files' \
|
||||
'--oidc-skip-ssl-validation[skip SSL validation]' \
|
||||
'--tsa-log-level=[minimum level of logs to see]: :_concourse_log_levels' \
|
||||
'--tsa-bind-ip=[IP address on which to listen for SSH]: :_concourse_ip_addresses' \
|
||||
'--tsa-peer-address=[network address of this web node, reachable by other web nodes]: :_urls' \
|
||||
'--tsa-bind-port=[port on which to listen for SSH]: :_concourse_ports' \
|
||||
'--tsa-debug-bind-ip=[IP address on which to listen for the pprof debugger endpoints]: :_concourse_ip_addresses' \
|
||||
'--tsa-debug-bind-port=[port on which to listen for the pprof debugger endpoints]: :_concourse_ports' \
|
||||
'--tsa-host-key=[path to private key to use for the SSH server]: :_files' \
|
||||
'--tsa-authorized-keys=[path to file containing keys to authorize, in SSH authorized_keys format]: :_files' \
|
||||
'--tsa-team-authorized-keys=[path to file containing keys to authorize, in SSH authorized_keys format]: :_concourse_name_colon_paths' \
|
||||
'--tsa-atc-url=[ATC API endpoints to which workers will be registered]: :_urls' \
|
||||
'--tsa-session-signing-key=[path to private key to use when signing tokens in requests to the ATC during registration]: :_files' \
|
||||
'--tsa-heartbeat-interval=[interval on which to heartbeat workers to the ATC]: :_concourse_durations' \
|
||||
$concourse_web_configurations[@] \
|
||||
"--enable-lets-encrypt[automatically configure TLS certificate via Let's Encrypt/ACME]" \
|
||||
"--lets-encrypt-acme-url=[URL of ACME CA directory endpoint]:url:_urls" \
|
||||
$concourse_postgres_configurations[@] \
|
||||
$concourse_credential_manager_configurations[@] \
|
||||
'--signing-key-check-interval=[how often to check for outdated or expired signing keys(default: 10m)]: :_concourse_durations' \
|
||||
'--signing-key-rotation-period=[after which time a new signing key for the idtoken secrets provider should be generated]: :_concourse_durations' \
|
||||
'--signing-key-rotation-period=[how long a key should still be published after a new key has been generated]: :_concourse_durations' \
|
||||
$concourse_container_placement_strategy_configurations[@] \
|
||||
$concourse_metric_configurations[@] \
|
||||
$concourse_tracing_configurations[@] \
|
||||
$concourse_policy_check_agent_configurations[@] \
|
||||
$concourse_web_server_configurations[@] \
|
||||
$concourse_gc_configurations[@] \
|
||||
$concourse_syslog_configurations[@] \
|
||||
$concourse_authentication_configurations[@] \
|
||||
$concourse_feature_flags[@] \
|
||||
$concourse_tsa_configurations[@]
|
||||
}
|
||||
|
||||
(( $+functions[_concourse_worker_args] )) ||
|
||||
_concourse_worker_args() {
|
||||
_arguments -C \
|
||||
(( $+functions[_concourse_worker] )) ||
|
||||
_concourse_worker() {
|
||||
_arguments \
|
||||
'(- : *)'{-h,--help}'[display help information]' \
|
||||
'--name=[the name to set for the worker during registration]:name' \
|
||||
'*--tag=[a tag to set during registration]:tag' \
|
||||
|
|
@ -361,18 +734,43 @@ _concourse_worker_args() {
|
|||
'*--tsa-host=[TSA host to forward the worker through]: :_hosts' \
|
||||
'--tsa-public-key=[file containing a public key to expect from the TSA]: :_files' \
|
||||
'--tsa-worker-private-key=[file containing the private key to use when authenticating to the TSA]: :_files' \
|
||||
'--garden-use-houdini[use the insecure Houdini Garden backend]' \
|
||||
'--garden-bin=[path to gdn executable (or leave as gdn to find it in $PATH)]: :_files' \
|
||||
'--runtime=[runtime to use with the worker(default: guardian)]:runtime:(guardian containerd houdini)' \
|
||||
'--garden-bin=[path to gdn executable (or leave as gdn to find it in $PATH)]: :_path_commands' \
|
||||
'--garden-request-timeout=[how long to wait for requests to the Garden server to complete]:time' \
|
||||
'--garden-config=[path to a config file to use for Garden]: :_files' \
|
||||
'--garden-dns-proxy-enable[enable proxy DNS server]' \
|
||||
'--garden-network-pool=[network range to use for dynamically allocated container subnets]:range' \
|
||||
'--garden-max-containers=[maximum container capacity for Garden]:limit' \
|
||||
'--containerd-config=[path to a config file to use for the Containerd]:path:_files' \
|
||||
'--containerd-bin=[path to a containerd executable]:command:_path_commands' \
|
||||
'--containerd-init-bin=[path to an init executable]:init:_files' \
|
||||
'--containerd-seccomp-profile=[path to a seccomp filter override]:filter:_files' \
|
||||
'--containerd-oci-hooks-dir=[path to the oci hooks dir]:dir:_files -/' \
|
||||
'--containerd-cni-plugins-dir=[path to CnI network plugins]:dir:_files -/' \
|
||||
'--containerd-request-timeout=[how long to wait for requests to Containerd to complete]:timeout' \
|
||||
'--containerd-max-containers=[max conatiner capacity for containerd]:limit' \
|
||||
'--containerd-privileged-mode=[how many privileges privileged containers get]:type:(full fuse-only ignore)' \
|
||||
"--containerd-external-ip=[IP address to use to reach container's mapped ports]:ip" \
|
||||
'--containerd-dns-server=[DNS server IP address to use instead of automatically determined servers]:ip' \
|
||||
'--containerd-additional-hosts=[additioanl entries to add to /etc/hosts in containers]:hosts' \
|
||||
'--containerd-restricted-network=[network ranges to which traffic from containers will be restricted]:range' \
|
||||
'--containerd-network-pool=[network range to use for dynamically allocated containers subnets]:range' \
|
||||
'--containerd-mtu=[MTU size for container network interfaces]:size' \
|
||||
"--containerd-allow-host-access[allow containers to reach the host's network]"
|
||||
'--containerd-dns-proxy-enable[enable proxy DNS server]' \
|
||||
'--containerd-v6-enable[enable IPv6 networking]' \
|
||||
'--containerd-v6-pool=[IPv6 network range to use for dynamically allocated container addresses]:range' \
|
||||
'--containerd-v6-disable-masquerade[Masquerade container traffic with worker address]' \
|
||||
'--baggageclaim-log-level=[minimum level of logs to see]: :_concourse_log_levels' \
|
||||
'--baggageclaim-bind-ip=[IP address on which to listen for API traffic]: :_concourse_ip_addresses' \
|
||||
'--baggageclaim-bind-port=[port on which to listen for API traffic]: :_concourse_ports' \
|
||||
'--baggageclaim-debug-bind-ip=[IP address on which to listen for the pprof debugger endpoints]: :_concourse_ip_addresses' \
|
||||
'--baggageclaim-debug-bind-port=[port on which to listen for the pprof debugger endpoints]: :_concourse_ports' \
|
||||
'--baggageclaim-p2p-interface-name-pattern=[regular expression to match a network interface for p2p streaming]:pattern' \
|
||||
'--baggageclaim-p2p-interface-family=[IPv4 or IPv6(default IPv4)]:type:(4 6)' \
|
||||
'--baggageclaim-volumes=[directory in which to place volume data]: :_files -/' \
|
||||
'--baggageclaim-driver=[driver to use for managing volumes]:driver:((detect naive btrfs overlay))' \
|
||||
'--baggageclaim-btrfs-bin=[path to btrfs binary]: :_files' \
|
||||
'--baggageclaim-driver=[driver to use for managing volumes]:driver:(detect naive btrfs overlay)' \
|
||||
'--baggageclaim-btrfs-bin=[path to btrfs binary]: :_path_commands' \
|
||||
'--baggageclaim-mkfs-bin=[path to mkfs.btrfs binary]: :_files' \
|
||||
'--baggageclaim-overlays-dir=[path to directory in which to store overlay data]: :_files -/' \
|
||||
'--baggageclaim-disable-user-namespaces[disable remapping of user/group IDs in unprivileged volumes]'
|
||||
|
|
@ -467,10 +865,7 @@ _concourse_log_levels() {
|
|||
_describe -t log-levels 'log level' levels
|
||||
}
|
||||
|
||||
case $service in
|
||||
concourse) _concourse_server "$@" ;;
|
||||
*) _message "unknown command ${service}" && ret=1 ;;
|
||||
esac
|
||||
_concourse_server "$@"
|
||||
|
||||
# Local Variables:
|
||||
# mode: Shell-Script
|
||||
|
|
|
|||
Loading…
Reference in New Issue