####### # NEW URL FOR THIS FILE: https://argo-workflows.readthedocs.io/en/latest/workflow-controller-configmap.yaml ####### # This file describes the config settings available in the workflow controller configmap apiVersion: v1 kind: ConfigMap metadata: name: workflow-controller-configmap data: # instanceID is a label selector to limit the controller's watch to a specific instance. It # contains an arbitrary value that is carried forward into its pod labels, under the key # workflows.argoproj.io/controller-instanceid, for the purposes of workflow segregation. This # enables a controller to only receive workflow and pod events that it is interested about, # in order to support multiple controllers in a single cluster, and ultimately allows the # controller itself to be bundled as part of a higher level application. If omitted, the # controller watches workflows and pods that *are not* labeled with an instance id. instanceID: my-ci-controller # Namespace is a label selector filter to limit the controller's watch to a specific namespace namespace: my-namespace # Parallelism limits the max total parallel workflows that can execute at the same time # (available since Argo v2.3). Controller must be restarted to take effect. parallelism: "10" # Limit the maximum number of incomplete workflows in a namespace. # Intended for cluster installs that are multi-tenancy environments, to prevent too many workflows in one # namespace impacting others. # >= v3.2 namespaceParallelism: "10" # Globally limits the rate at which pods are created. # This is intended to mitigate flooding of the Kubernetes API server by workflows with a large amount of # parallel nodes. resourceRateLimit: | limit: 10 burst: 1 # Whether or not to emit events on node completion. These can take a up a lot of space in # k8s (typically etcd) resulting in errors when trying to create new events: # "Unable to create audit event: etcdserver: mvcc: database space exceeded" # This config item allows you to disable this. # (since v2.9) nodeEvents: | enabled: true # uncomment following lines if workflow controller runs in a different k8s cluster with the # workflow workloads, or needs to communicate with the k8s apiserver using an out-of-cluster # kubeconfig secret # kubeConfig: # # name of the kubeconfig secret, may not be empty when kubeConfig specified # secretName: kubeconfig-secret # # key of the kubeconfig secret, may not be empty when kubeConfig specified # secretKey: kubeconfig # # mounting path of the kubeconfig secret, default to /kube/config # mountPath: /kubeconfig/mount/path # # volume name when mounting the secret, default to kubeconfig # volumeName: kube-config-volume links: | # Adds a button to the workflow page. E.g. linking to you logging facility. - name: Example Workflow Link scope: workflow url: http://logging-facility?namespace=${metadata.namespace}&workflowName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt} # Adds a button next to the pod. E.g. linking to you logging facility but for the pod only. - name: Example Pod Link scope: pod url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt} - name: Pod Logs scope: pod-logs url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt} - name: Event Source Logs scope: event-source-logs url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt} - name: Sensor Logs scope: sensor-logs url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt} # Adds a button to the bottom right of every page to link to your organisation help or chat. - name: Get help scope: chat url: http://my-chat # Adds a button to the top of workflow view to navigate to customized views. - name: Completed Workflows scope: workflow-list url: http://workflows?label=workflows.argoproj.io/completed=true # Columns are custom columns that will be exposed in the Workflow List View. # (available since Argo v3.5) columns: | # Adds a column to the Workflow List View - # The name of this column, e.g., "Workflow Completed". name: Workflow Completed # The type of this column, "label" or "annotation". type: label # The key of the label or annotation, e.g., "workflows.argoproj.io/completed". key: workflows.argoproj.io/completed # uncomment following lines if you want to change navigation bar background color # navColor: red # artifactRepository defines the default location to be used as the artifact repository for # container artifacts. artifactRepository: | # archiveLogs will archive the main container logs as an artifact archiveLogs: true s3: # Use the corresponding endpoint depending on your S3 provider: # AWS: s3.amazonaws.com # GCS: storage.googleapis.com # Minio: my-minio-endpoint.default:9000 endpoint: s3.amazonaws.com bucket: my-bucket region: us-west-2 # insecure will disable TLS. Primarily used for minio installs not configured with TLS insecure: false # keyFormat is a format pattern to define how artifacts will be organized in a bucket. # It can reference workflow metadata variables such as workflow.namespace, workflow.name, # pod.name. Can also use strftime formating of workflow.creationTimestamp so that workflow # artifacts can be organized by date. If omitted, will use `{{workflow.name}}/{{pod.name}}`, # which has potential for have collisions. # The following example pattern organizes workflow artifacts under a "my-artifacts" sub dir, # then sub dirs for year, month, date and finally workflow name and pod. # e.g.: my-artifacts/2018/08/23/my-workflow-abc123/my-workflow-abc123-1234567890 keyFormat: "my-artifacts\ /{{workflow.creationTimestamp.Y}}\ /{{workflow.creationTimestamp.m}}\ /{{workflow.creationTimestamp.d}}\ /{{workflow.name}}\ /{{pod.name}}" # The actual secret object (in this example my-s3-credentials), should be created in every # namespace where a workflow needs to store its artifacts to S3. If omitted, # attempts to use IAM role to access the bucket (instead of accessKey/secretKey). accessKeySecret: name: my-s3-credentials key: accessKey secretKeySecret: name: my-s3-credentials key: secretKey # If this is set to true, argo workflows will use AWS SDK default credentials provider chain. This will allow things like # IRSA and any of the authentication methods that the golang SDK uses in it's default chain. # If you are using IRSA on AWS, and set this option to true, you will also need to modify Argo-Server Deployment with # `spec.template.spec.securityContext.fsGroup: 65534` configuration. This is required for IRSA to be able to access # `/var/run/secrets/eks.amazonaws.com/serviceaccount/token` file, and authenticate with AWS. useSDKCreds: false encryptionOptions: # If this is set to true, SSE-S3 encryption will be used to store objects # unless kmsKeyId or serverSideCustomerKeySecret is set enableEncryption: false # A valid kms key id. If this value is set, the object stored in s3 will be encrypted with SSE-KMS # Note: You cannot set both kmsKeyId and serverSideCustomerKeySecret # kmsKeyId: '' # Allows you to set a json blob of simple key value pairs. See # https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context # for more information # kmsEncryptionContext: '' # The actual secret object (in this example my-s3-credentials), # should be created when using a custom secret to encrypt objects in using SSE-C # Note: You cannot set both kmsKeyId and serverSideCustomerKeySecret # serverSideCustomerKeySecret: # name: my-s3-credentials # key: secretKey # Specifies the container runtime interface to use (default: emissary) # must be one of: docker, kubelet, k8sapi, pns, emissary # It has lower precedence than either `--container-runtime-executor` and `containerRuntimeExecutors`. # (removed in v3.4) containerRuntimeExecutor: emissary # Specifies the executor to use. # # You can use this to: # * Tailor your executor based on your preference for security or performance. # * Test out an executor without committing yourself to use it for every workflow. # # To find out which executor was actually use, see the `wait` container logs. # # The list is in order of precedence; the first matching executor is used. # This has precedence over `containerRuntimeExecutor`. # (removed in v3.4) containerRuntimeExecutors: | - name: emissary selector: matchLabels: workflows.argoproj.io/container-runtime-executor: emissary - name: pns selector: matchLabels: workflows.argoproj.io/container-runtime-executor: pns # Specifies the location of docker.sock on the host for docker executor (default: /var/run/docker.sock) # (available v2.4-v3.3) dockerSockPath: /var/someplace/else/docker.sock # kubelet port when using kubelet executor (default: 10250) (kubelet executor will be deprecated use emissary instead) # (removed in v3.4) kubeletPort: "10250" # disable the TLS verification of the kubelet executor (default: false) # (removed in v3.4) kubeletInsecure: "false" # The command/args for each image, needed when the command is not specified and the emissary executor is used. # https://argoproj.github.io/argo-workflows/workflow-executors/#emissary-emissary images: | argoproj/argosay:v2: cmd: [/argosay] docker/whalesay:latest: cmd: [/bin/bash] # Defaults for main containers. These can be overridden by the template. # <= v3.3 only `resources` are supported. # >= v3.4 all fields are supported, including security context. mainContainer: | imagePullPolicy: IfNotPresent resources: requests: cpu: 0.1 memory: 64Mi limits: cpu: 0.5 memory: 512Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000 # executor controls how the init and wait container should be customized # (available since Argo v2.3) executor: | imagePullPolicy: IfNotPresent resources: requests: cpu: 0.1 memory: 64Mi limits: cpu: 0.5 memory: 512Mi # args & env allows command line arguments and environment variables to be appended to the # executor container and is mainly used for development/debugging purposes. args: - --loglevel - debug - --gloglevel - "6" env: # ARGO_TRACE enables some tracing information for debugging purposes. Currently it enables # logging of S3 request/response payloads (including auth headers) - name: ARGO_TRACE value: "1" # metricsConfig controls the path and port for prometheus metrics. Metrics are enabled and emitted on localhost:9090/metrics # by default. metricsConfig: | # Enabled controls metric emission. Default is true, set "enabled: false" to turn off enabled: true # Path is the path where metrics are emitted. Must start with a "/". Default is "/metrics" path: /metrics # Port is the port where metrics are emitted. Default is "9090" port: 8080 # MetricsTTL sets how often custom metrics are cleared from memory. Default is "0", metrics are never cleared metricsTTL: "10m" # IgnoreErrors is a flag that instructs prometheus to ignore metric emission errors. Default is "false" ignoreErrors: false # Use a self-signed cert for TLS, default false secure: false # DEPRECATED: Legacy metrics are now removed, this field is ignored disableLegacy: false # telemetryConfig controls the path and port for prometheus telemetry. Telemetry is enabled and emitted in the same endpoint # as metrics by default, but can be overridden using this config. telemetryConfig: | enabled: true path: /telemetry port: 8080 secure: true # Use a self-signed cert for TLS, default false # enable persistence using postgres persistence: | connectionPool: maxIdleConns: 100 maxOpenConns: 0 connMaxLifetime: 0s # 0 means connections don't have a max lifetime # if true node status is only saved to the persistence DB to avoid the 1MB limit in etcd nodeStatusOffLoad: false # save completed workloads to the workflow archive archive: false # the number of days to keep archived workflows (the default is forever) archiveTTL: 180d # skip database migration if needed. # skipMigration: true # LabelSelector determines the workflow that matches with the matchlabels or matchrequirements, will be archived. # https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ archiveLabelSelector: matchLabels: workflows.argoproj.io/archive-strategy: "always" # Optional name of the cluster I'm running in. This must be unique for your cluster. clusterName: default postgresql: host: localhost port: 5432 database: postgres tableName: argo_workflows # the database secrets must be in the same namespace of the controller userNameSecret: name: argo-postgres-config key: username passwordSecret: name: argo-postgres-config key: password ssl: true # sslMode must be one of: disable, require, verify-ca, verify-full # you can find more information about those ssl options here: https://godoc.org/github.com/lib/pq sslMode: require # Optional config for mysql: # mysql: # host: localhost # port: 3306 # database: argo # tableName: argo_workflows # userNameSecret: # name: argo-mysql-config # key: username # passwordSecret: # name: argo-mysql-config # key: password # PodSpecLogStrategy enables the logging of pod specs in the controller log. # podSpecLogStrategy: | # failedPod: true # allPods: false # PodGCGracePeriodSeconds specifies the duration in seconds before a terminating pod is forcefully killed. # Value must be non-negative integer. A zero value indicates that the pod will be forcefully terminated immediately. # Defaults to the Kubernetes default of 30 seconds. podGCGracePeriodSeconds: "60" # PodGCDeleteDelayDuration specifies the duration before pods in the GC queue get deleted. # Value must be non-negative. A zero value indicates that the pods will be deleted immediately. # Defaults to 5 seconds. podGCDeleteDelayDuration: 30s # adds initial delay (for K8S clusters with mutating webhooks) to prevent workflow getting modified by MWC. # initialDelay: 5s # Workflow retention by number of workflows # retentionPolicy: | # completed: 10 # failed: 3 # errored: 3 # Default values that will apply to all Workflows from this controller, unless overridden on the Workflow-level # See more: docs/default-workflow-specs.md workflowDefaults: | metadata: annotations: argo: workflows labels: foo: bar spec: ttlStrategy: secondsAfterSuccess: 5 parallelism: 3 # SSO Configuration for the Argo server. # You must also start argo server with `--auth-mode sso`. # https://argoproj.github.io/argo-workflows/argo-server-auth-mode/ sso: | # This is the root URL of the OIDC provider (required). issuer: https://issuer.root.url/ # Some OIDC providers have alternate root URLs that can be included. These should be reviewed carefully. (optional) issuerAlias: https://altissuer.root.url # This defines how long your login is valid for (in hours). (optional) # If omitted, defaults to 10h. Example below is 10 days. sessionExpiry: 240h # This is name of the secret and the key in it that contain OIDC client # ID issued to the application by the provider (required). clientId: name: client-id-secret key: client-id-key # This is name of the secret and the key in it that contain OIDC client # secret issued to the application by the provider (required). clientSecret: name: client-secret-secret key: client-secret-key # This is the redirect URL supplied to the provider (optional). It must # be in the form /oauth2/callback. It must be # browser-accessible. If omitted, will be automatically generated. redirectUrl: https://argo-server/oauth2/callback # Additional scopes to request. Typically needed for SSO RBAC. >= v2.12 scopes: - groups - email - profile # RBAC Config. >= v2.12 rbac: enabled: false # Skip TLS verify, not recommended in production environments. Useful for testing purposes. >= v3.2.4 insecureSkipVerify: false # workflowRestrictions restricts the Workflows that the controller will process. # Current options: # Strict: Only Workflows using "workflowTemplateRef" will be processed. This allows the administrator of the controller # to set a "library" of templates that may be run by its operator, limiting arbitrary Workflow execution. # Secure: Only Workflows using "workflowTemplateRef" will be processed and the controller will enforce # that the WorkflowTemplate that is referenced hasn't changed between operations. If you want to make sure the operator of the # Workflow cannot run an arbitrary Workflow, use this option. workflowRestrictions: | templateReferencing: Strict