[core]
# The folder where your airflow pipelines live, most likely a
# subfolder in a code repository. This path must be absolute.
#
# Variable: AIRFLOW__CORE__DAGS_FOLDER
#
dags_folder = /opt/airflow/dags

# Hostname by providing a path to a callable, which will resolve the hostname.
# The format is "package.function".
# 
# For example, default value "airflow.utils.net.getfqdn" means that result from patched
# version of socket.getfqdn() - see https://github.com/python/cpython/issues/49254.
# 
# No argument should be required in the function specified.
# If using IP address as hostname is preferred, use value ``airflow.utils.net.get_host_ip_address``
#
# Variable: AIRFLOW__CORE__HOSTNAME_CALLABLE
#
hostname_callable = airflow.utils.net.getfqdn

# A callable to check if a python file has airflow dags defined or not
# with argument as: `(file_path: str, zip_file: zipfile.ZipFile | None = None)`
# return True if it has dags otherwise False
# If this is not provided, Airflow uses its own heuristic rules.
#
# Variable: AIRFLOW__CORE__MIGHT_CONTAIN_DAG_CALLABLE
#
might_contain_dag_callable = airflow.utils.file.might_contain_dag_via_default_heuristic

# Default timezone in case supplied date times are naive
# can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam)
#
# Variable: AIRFLOW__CORE__DEFAULT_TIMEZONE
#
default_timezone = utc

# The executor class that airflow should use. Choices include
# ``SequentialExecutor``, ``LocalExecutor``, ``CeleryExecutor``, ``DaskExecutor``,
# ``KubernetesExecutor``, ``CeleryKubernetesExecutor``, ``LocalKubernetesExecutor`` or the
# full import path to the class when using a custom executor.
#
# Variable: AIRFLOW__CORE__EXECUTOR
#
executor = SequentialExecutor

# The auth manager class that airflow should use. Full import path to the auth manager class.
#
# Variable: AIRFLOW__CORE__AUTH_MANAGER
#
auth_manager = airflow.auth.managers.fab.fab_auth_manager.FabAuthManager

# This defines the maximum number of task instances that can run concurrently per scheduler in
# Airflow, regardless of the worker count. Generally this value, multiplied by the number of
# schedulers in your cluster, is the maximum number of task instances with the running
# state in the metadata database.
#
# Variable: AIRFLOW__CORE__PARALLELISM
#
parallelism = 32

# The maximum number of task instances allowed to run concurrently in each DAG. To calculate
# the number of tasks that is running concurrently for a DAG, add up the number of running
# tasks for all DAG runs of the DAG. This is configurable at the DAG level with ``max_active_tasks``,
# which is defaulted as ``max_active_tasks_per_dag``.
# 
# An example scenario when this would be useful is when you want to stop a new dag with an early
# start date from stealing all the executor slots in a cluster.
#
# Variable: AIRFLOW__CORE__MAX_ACTIVE_TASKS_PER_DAG
#
max_active_tasks_per_dag = 16

# Are DAGs paused by default at creation
#
# Variable: AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION
#
dags_are_paused_at_creation = True

# The maximum number of active DAG runs per DAG. The scheduler will not create more DAG runs
# if it reaches the limit. This is configurable at the DAG level with ``max_active_runs``,
# which is defaulted as ``max_active_runs_per_dag``.
#
# Variable: AIRFLOW__CORE__MAX_ACTIVE_RUNS_PER_DAG
#
max_active_runs_per_dag = 16

# The name of the method used in order to start Python processes via the multiprocessing module.
# This corresponds directly with the options available in the Python docs:
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.set_start_method.
# Must be one of the values returned by:
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_all_start_methods.
#
# Example: mp_start_method = fork
#
# Variable: AIRFLOW__CORE__MP_START_METHOD
#
# mp_start_method = 

# Whether to load the DAG examples that ship with Airflow. It's good to
# get started, but you probably want to set this to ``False`` in a production
# environment
#
# Variable: AIRFLOW__CORE__LOAD_EXAMPLES
#
load_examples = True

# Path to the folder containing Airflow plugins
#
# Variable: AIRFLOW__CORE__PLUGINS_FOLDER
#
plugins_folder = /opt/airflow/plugins

# Should tasks be executed via forking of the parent process ("False",
# the speedier option) or by spawning a new python process ("True" slow,
# but means plugin changes picked up by tasks straight away)
#
# Variable: AIRFLOW__CORE__EXECUTE_TASKS_NEW_PYTHON_INTERPRETER
#
execute_tasks_new_python_interpreter = False

# Secret key to save connection passwords in the db
#
# Variable: AIRFLOW__CORE__FERNET_KEY
#
fernet_key = 

# Whether to disable pickling dags
#
# Variable: AIRFLOW__CORE__DONOT_PICKLE
#
donot_pickle = True

# How long before timing out a python file import
#
# Variable: AIRFLOW__CORE__DAGBAG_IMPORT_TIMEOUT
#
dagbag_import_timeout = 30.0

# Should a traceback be shown in the UI for dagbag import errors,
# instead of just the exception message
#
# Variable: AIRFLOW__CORE__DAGBAG_IMPORT_ERROR_TRACEBACKS
#
dagbag_import_error_tracebacks = True

# If tracebacks are shown, how many entries from the traceback should be shown
#
# Variable: AIRFLOW__CORE__DAGBAG_IMPORT_ERROR_TRACEBACK_DEPTH
#
dagbag_import_error_traceback_depth = 2

# How long before timing out a DagFileProcessor, which processes a dag file
#
# Variable: AIRFLOW__CORE__DAG_FILE_PROCESSOR_TIMEOUT
#
dag_file_processor_timeout = 50

# The class to use for running task instances in a subprocess.
# Choices include StandardTaskRunner, CgroupTaskRunner or the full import path to the class
# when using a custom task runner.
#
# Variable: AIRFLOW__CORE__TASK_RUNNER
#
task_runner = StandardTaskRunner

# If set, tasks without a ``run_as_user`` argument will be run with this user
# Can be used to de-elevate a sudo user running Airflow when executing tasks
#
# Variable: AIRFLOW__CORE__DEFAULT_IMPERSONATION
#
default_impersonation = 

# What security module to use (for example kerberos)
#
# Variable: AIRFLOW__CORE__SECURITY
#
security = 

# Turn unit test mode on (overwrites many configuration options with test
# values at runtime)
#
# Variable: AIRFLOW__CORE__UNIT_TEST_MODE
#
unit_test_mode = False

# Whether to enable pickling for xcom (note that this is insecure and allows for
# RCE exploits).
#
# Variable: AIRFLOW__CORE__ENABLE_XCOM_PICKLING
#
enable_xcom_pickling = False

# What classes can be imported during deserialization. This is a multi line value.
# The individual items will be parsed as regexp. Python built-in classes (like dict)
# are always allowed. Bare "." will be replaced so you can set airflow.* .
#
# Variable: AIRFLOW__CORE__ALLOWED_DESERIALIZATION_CLASSES
#
allowed_deserialization_classes = airflow\..*

# When a task is killed forcefully, this is the amount of time in seconds that
# it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED
#
# Variable: AIRFLOW__CORE__KILLED_TASK_CLEANUP_TIME
#
killed_task_cleanup_time = 60

# Whether to override params with dag_run.conf. If you pass some key-value pairs
# through ``airflow dags backfill -c`` or
# ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params.
#
# Variable: AIRFLOW__CORE__DAG_RUN_CONF_OVERRIDES_PARAMS
#
dag_run_conf_overrides_params = True

# If enabled, Airflow will only scan files containing both ``DAG`` and ``airflow`` (case-insensitive).
#
# Variable: AIRFLOW__CORE__DAG_DISCOVERY_SAFE_MODE
#
dag_discovery_safe_mode = True

# The pattern syntax used in the ".airflowignore" files in the DAG directories. Valid values are
# ``regexp`` or ``glob``.
#
# Variable: AIRFLOW__CORE__DAG_IGNORE_FILE_SYNTAX
#
dag_ignore_file_syntax = regexp

# The number of retries each task is going to have by default. Can be overridden at dag or task level.
#
# Variable: AIRFLOW__CORE__DEFAULT_TASK_RETRIES
#
default_task_retries = 0

# The number of seconds each task is going to wait by default between retries. Can be overridden at
# dag or task level.
#
# Variable: AIRFLOW__CORE__DEFAULT_TASK_RETRY_DELAY
#
default_task_retry_delay = 300

# The maximum delay (in seconds) each task is going to wait by default between retries.
# This is a global setting and cannot be overridden at task or DAG level.
#
# Variable: AIRFLOW__CORE__MAX_TASK_RETRY_DELAY
#
max_task_retry_delay = 86400

# The weighting method used for the effective total priority weight of the task
#
# Variable: AIRFLOW__CORE__DEFAULT_TASK_WEIGHT_RULE
#
default_task_weight_rule = downstream

# The default task execution_timeout value for the operators. Expected an integer value to
# be passed into timedelta as seconds. If not specified, then the value is considered as None,
# meaning that the operators are never timed out by default.
#
# Variable: AIRFLOW__CORE__DEFAULT_TASK_EXECUTION_TIMEOUT
#
default_task_execution_timeout = 

# Updating serialized DAG can not be faster than a minimum interval to reduce database write rate.
#
# Variable: AIRFLOW__CORE__MIN_SERIALIZED_DAG_UPDATE_INTERVAL
#
min_serialized_dag_update_interval = 30

# If True, serialized DAGs are compressed before writing to DB.
# Note: this will disable the DAG dependencies view
#
# Variable: AIRFLOW__CORE__COMPRESS_SERIALIZED_DAGS
#
compress_serialized_dags = False

# Fetching serialized DAG can not be faster than a minimum interval to reduce database
# read rate. This config controls when your DAGs are updated in the Webserver
#
# Variable: AIRFLOW__CORE__MIN_SERIALIZED_DAG_FETCH_INTERVAL
#
min_serialized_dag_fetch_interval = 10

# Maximum number of Rendered Task Instance Fields (Template Fields) per task to store
# in the Database.
# All the template_fields for each of Task Instance are stored in the Database.
# Keeping this number small may cause an error when you try to view ``Rendered`` tab in
# TaskInstance view for older tasks.
#
# Variable: AIRFLOW__CORE__MAX_NUM_RENDERED_TI_FIELDS_PER_TASK
#
max_num_rendered_ti_fields_per_task = 30

# On each dagrun check against defined SLAs
#
# Variable: AIRFLOW__CORE__CHECK_SLAS
#
check_slas = True

# Path to custom XCom class that will be used to store and resolve operators results
#
# Example: xcom_backend = path.to.CustomXCom
#
# Variable: AIRFLOW__CORE__XCOM_BACKEND
#
xcom_backend = airflow.models.xcom.BaseXCom

# By default Airflow plugins are lazily-loaded (only loaded when required). Set it to ``False``,
# if you want to load plugins whenever 'airflow' is invoked via cli or loaded from module.
#
# Variable: AIRFLOW__CORE__LAZY_LOAD_PLUGINS
#
lazy_load_plugins = True

# By default Airflow providers are lazily-discovered (discovery and imports happen only when required).
# Set it to False, if you want to discover providers whenever 'airflow' is invoked via cli or
# loaded from module.
#
# Variable: AIRFLOW__CORE__LAZY_DISCOVER_PROVIDERS
#
lazy_discover_providers = True

# Hide sensitive Variables or Connection extra json keys from UI and task logs when set to True
# 
# (Connection passwords are always hidden in logs)
#
# Variable: AIRFLOW__CORE__HIDE_SENSITIVE_VAR_CONN_FIELDS
#
hide_sensitive_var_conn_fields = True

# A comma-separated list of extra sensitive keywords to look for in variables names or connection's
# extra JSON.
#
# Variable: AIRFLOW__CORE__SENSITIVE_VAR_CONN_NAMES
#
sensitive_var_conn_names = 

# Task Slot counts for ``default_pool``. This setting would not have any effect in an existing
# deployment where the ``default_pool`` is already created. For existing deployments, users can
# change the number of slots using Webserver, API or the CLI
#
# Variable: AIRFLOW__CORE__DEFAULT_POOL_TASK_SLOT_COUNT
#
default_pool_task_slot_count = 128

# The maximum list/dict length an XCom can push to trigger task mapping. If the pushed list/dict has a
# length exceeding this value, the task pushing the XCom will be failed automatically to prevent the
# mapped tasks from clogging the scheduler.
#
# Variable: AIRFLOW__CORE__MAX_MAP_LENGTH
#
max_map_length = 1024

# The default umask to use for process when run in daemon mode (scheduler, worker,  etc.)
# 
# This controls the file-creation mode mask which determines the initial value of file permission bits
# for newly created files.
# 
# This value is treated as an octal-integer.
#
# Variable: AIRFLOW__CORE__DAEMON_UMASK
#
daemon_umask = 0o077

# Class to use as dataset manager.
#
# Example: dataset_manager_class = airflow.datasets.manager.DatasetManager
#
# Variable: AIRFLOW__CORE__DATASET_MANAGER_CLASS
#
# dataset_manager_class = 

# Kwargs to supply to dataset manager.
#
# Example: dataset_manager_kwargs = {"some_param": "some_value"}
#
# Variable: AIRFLOW__CORE__DATASET_MANAGER_KWARGS
#
# dataset_manager_kwargs = 

# (experimental) Whether components should use Airflow Internal API for DB connectivity.
#
# Variable: AIRFLOW__CORE__DATABASE_ACCESS_ISOLATION
#
database_access_isolation = False

# (experimental) Airflow Internal API url. Only used if [core] database_access_isolation is True.
#
# Example: internal_api_url = http://localhost:8080
#
# Variable: AIRFLOW__CORE__INTERNAL_API_URL
#
# internal_api_url = 

# The ability to allow testing connections across Airflow UI, API and CLI.
# Supported options: Disabled, Enabled, Hidden. Default: Disabled
# Disabled - Disables the test connection functionality and disables the Test Connection button in UI.
# Enabled - Enables the test connection functionality and shows the Test Connection button in UI.
# Hidden - Disables the test connection functionality and hides the Test Connection button in UI.
# Before setting this to Enabled, make sure that you review the users who are able to add/edit
# connections and ensure they are trusted. Connection testing can be done maliciously leading to
# undesired and insecure outcomes. For more information on capabilities of users, see the documentation:
# https://airflow.apache.org/docs/apache-airflow/stable/security/security_model.html#capabilities-of-authenticated-ui-users
#
# Variable: AIRFLOW__CORE__TEST_CONNECTION
#
test_connection = Disabled

[database]
# Path to the ``alembic.ini`` file. You can either provide the file path relative
# to the Airflow home directory or the absolute path if it is located elsewhere.
#
# Variable: AIRFLOW__DATABASE__ALEMBIC_INI_FILE_PATH
#
alembic_ini_file_path = alembic.ini

# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engines.
# More information here:
# http://airflow.apache.org/docs/apache-airflow/stable/howto/set-up-database.html#database-uri
#
# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_CONN
#
sql_alchemy_conn = sqlite:////opt/airflow/airflow.db

# Extra engine specific keyword args passed to SQLAlchemy's create_engine, as a JSON-encoded value
#
# Example: sql_alchemy_engine_args = {"arg1": True}
#
# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_ENGINE_ARGS
#
# sql_alchemy_engine_args = 

# The encoding for the databases
#
# Variable: AIRFLOW__DATABASE__SQL_ENGINE_ENCODING
#
sql_engine_encoding = utf-8

# Collation for ``dag_id``, ``task_id``, ``key``, ``external_executor_id`` columns
# in case they have different encoding.
# By default this collation is the same as the database collation, however for ``mysql`` and ``mariadb``
# the default is ``utf8mb3_bin`` so that the index sizes of our index keys will not exceed
# the maximum size of allowed index when collation is set to ``utf8mb4`` variant
# (see https://github.com/apache/airflow/pull/17603#issuecomment-901121618).
#
# Variable: AIRFLOW__DATABASE__SQL_ENGINE_COLLATION_FOR_IDS
#
# sql_engine_collation_for_ids = 

# If SqlAlchemy should pool database connections.
#
# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_ENABLED
#
sql_alchemy_pool_enabled = True

# The SqlAlchemy pool size is the maximum number of database connections
# in the pool. 0 indicates no limit.
#
# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_SIZE
#
sql_alchemy_pool_size = 5

# The maximum overflow size of the pool.
# When the number of checked-out connections reaches the size set in pool_size,
# additional connections will be returned up to this limit.
# When those additional connections are returned to the pool, they are disconnected and discarded.
# It follows then that the total number of simultaneous connections the pool will allow
# is pool_size + max_overflow,
# and the total number of "sleeping" connections the pool will allow is pool_size.
# max_overflow can be set to ``-1`` to indicate no overflow limit;
# no limit will be placed on the total number of concurrent connections. Defaults to ``10``.
#
# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_OVERFLOW
#
sql_alchemy_max_overflow = 10

# The SqlAlchemy pool recycle is the number of seconds a connection
# can be idle in the pool before it is invalidated. This config does
# not apply to sqlite. If the number of DB connections is ever exceeded,
# a lower config value will allow the system to recover faster.
#
# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_RECYCLE
#
sql_alchemy_pool_recycle = 1800

# Check connection at the start of each connection pool checkout.
# Typically, this is a simple statement like "SELECT 1".
# More information here:
# https://docs.sqlalchemy.org/en/14/core/pooling.html#disconnect-handling-pessimistic
#
# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_PRE_PING
#
sql_alchemy_pool_pre_ping = True

# The schema to use for the metadata database.
# SqlAlchemy supports databases with the concept of multiple schemas.
#
# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_SCHEMA
#
sql_alchemy_schema = 

# Import path for connect args in SqlAlchemy. Defaults to an empty dict.
# This is useful when you want to configure db engine args that SqlAlchemy won't parse
# in connection string.
# See https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.connect_args
#
# Example: sql_alchemy_connect_args = {"timeout": 30}
#
# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_CONNECT_ARGS
#
# sql_alchemy_connect_args = 

# Whether to load the default connections that ship with Airflow when ``airflow db init`` is called.
# It's good to get started, but you probably want to set this to ``False`` in a production environment.
#
# Variable: AIRFLOW__DATABASE__LOAD_DEFAULT_CONNECTIONS
#
load_default_connections = True

# Number of times the code should be retried in case of DB Operational Errors.
# Not all transactions will be retried as it can cause undesired state.
# Currently it is only used in ``DagFileProcessor.process_file`` to retry ``dagbag.sync_to_db``.
#
# Variable: AIRFLOW__DATABASE__MAX_DB_RETRIES
#
max_db_retries = 3

# Whether to run alembic migrations during Airflow start up. Sometimes this operation can be expensive,
# and the users can assert the correct version through other means (e.g. through a Helm chart).
# Accepts "True" or "False".
#
# Variable: AIRFLOW__DATABASE__CHECK_MIGRATIONS
#
check_migrations = True

[logging]
# The folder where airflow should store its log files.
# This path must be absolute.
# There are a few existing configurations that assume this is set to the default.
# If you choose to override this you may need to update the dag_processor_manager_log_location and
# child_process_log_directory settings as well.
#
# Variable: AIRFLOW__LOGGING__BASE_LOG_FOLDER
#
base_log_folder = /opt/airflow/logs

# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search.
# Set this to True if you want to enable remote logging.
#
# Variable: AIRFLOW__LOGGING__REMOTE_LOGGING
#
remote_logging = False

# Users must supply an Airflow connection id that provides access to the storage
# location. Depending on your remote logging service, this may only be used for
# reading logs, not writing them.
#
# Variable: AIRFLOW__LOGGING__REMOTE_LOG_CONN_ID
#
remote_log_conn_id = 

# Whether the local log files for GCS, S3, WASB and OSS remote logging should be deleted after
# they are uploaded to the remote location.
#
# Variable: AIRFLOW__LOGGING__DELETE_LOCAL_LOGS
#
delete_local_logs = False

# Path to Google Credential JSON file. If omitted, authorization based on `the Application Default
# Credentials
# <https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
# be used.
#
# Variable: AIRFLOW__LOGGING__GOOGLE_KEY_PATH
#
google_key_path = 

# Storage bucket URL for remote logging
# S3 buckets should start with "s3://"
# Cloudwatch log groups should start with "cloudwatch://"
# GCS buckets should start with "gs://"
# WASB buckets should start with "wasb" just to help Airflow select correct handler
# Stackdriver logs should start with "stackdriver://"
#
# Variable: AIRFLOW__LOGGING__REMOTE_BASE_LOG_FOLDER
#
remote_base_log_folder = 

# The remote_task_handler_kwargs param is loaded into a dictionary and passed to __init__ of remote
# task handler and it overrides the values provided by Airflow config. For example if you set
# `delete_local_logs=False` and you provide ``{"delete_local_copy": true}``, then the local
# log files will be deleted after they are uploaded to remote location.
#
# Example: remote_task_handler_kwargs = {"delete_local_copy": true}
#
# Variable: AIRFLOW__LOGGING__REMOTE_TASK_HANDLER_KWARGS
#
remote_task_handler_kwargs = 

# Use server-side encryption for logs stored in S3
#
# Variable: AIRFLOW__LOGGING__ENCRYPT_S3_LOGS
#
encrypt_s3_logs = False

# Logging level.
# 
# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``.
#
# Variable: AIRFLOW__LOGGING__LOGGING_LEVEL
#
logging_level = INFO

# Logging level for celery. If not set, it uses the value of logging_level
# 
# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``.
#
# Variable: AIRFLOW__LOGGING__CELERY_LOGGING_LEVEL
#
celery_logging_level = 

# Logging level for Flask-appbuilder UI.
# 
# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``.
#
# Variable: AIRFLOW__LOGGING__FAB_LOGGING_LEVEL
#
fab_logging_level = WARNING

# Logging class
# Specify the class that will specify the logging configuration
# This class has to be on the python classpath
#
# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
#
# Variable: AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS
#
logging_config_class = 

# Flag to enable/disable Colored logs in Console
# Colour the logs when the controlling terminal is a TTY.
#
# Variable: AIRFLOW__LOGGING__COLORED_CONSOLE_LOG
#
colored_console_log = True

# Log format for when Colored logs is enabled
#
# Variable: AIRFLOW__LOGGING__COLORED_LOG_FORMAT
#
colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s

#
# Variable: AIRFLOW__LOGGING__COLORED_FORMATTER_CLASS
#
colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter

# Format of Log line
#
# Variable: AIRFLOW__LOGGING__LOG_FORMAT
#
log_format = [%%(asctime)s] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s

#
# Variable: AIRFLOW__LOGGING__SIMPLE_LOG_FORMAT
#
simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s

# Where to send dag parser logs. If "file", logs are sent to log files defined by child_process_log_directory.
#
# Variable: AIRFLOW__LOGGING__DAG_PROCESSOR_LOG_TARGET
#
dag_processor_log_target = file

# Format of Dag Processor Log line
#
# Variable: AIRFLOW__LOGGING__DAG_PROCESSOR_LOG_FORMAT
#
dag_processor_log_format = [%%(asctime)s] [SOURCE:DAG_PROCESSOR] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s

#
# Variable: AIRFLOW__LOGGING__LOG_FORMATTER_CLASS
#
log_formatter_class = airflow.utils.log.timezone_aware.TimezoneAware

# An import path to a function to add adaptations of each secret added with
# `airflow.utils.log.secrets_masker.mask_secret` to be masked in log messages. The given function
# is expected to require a single parameter: the secret to be adapted. It may return a
# single adaptation of the secret or an iterable of adaptations to each be masked as secrets.
# The original secret will be masked as well as any adaptations returned.
#
# Example: secret_mask_adapter = urllib.parse.quote
#
# Variable: AIRFLOW__LOGGING__SECRET_MASK_ADAPTER
#
secret_mask_adapter = 

# Specify prefix pattern like mentioned below with stream handler TaskHandlerWithCustomFormatter
#
# Example: task_log_prefix_template = {ti.dag_id}-{ti.task_id}-{execution_date}-{try_number}
#
# Variable: AIRFLOW__LOGGING__TASK_LOG_PREFIX_TEMPLATE
#
task_log_prefix_template = 

# Formatting for how airflow generates file names/paths for each task run.
#
# Variable: AIRFLOW__LOGGING__LOG_FILENAME_TEMPLATE
#
log_filename_template = dag_id={{ ti.dag_id }}/run_id={{ ti.run_id }}/task_id={{ ti.task_id }}/{%% if ti.map_index >= 0 %%}map_index={{ ti.map_index }}/{%% endif %%}attempt={{ try_number }}.log

# Formatting for how airflow generates file names for log
#
# Variable: AIRFLOW__LOGGING__LOG_PROCESSOR_FILENAME_TEMPLATE
#
log_processor_filename_template = {{ filename }}.log

# Full path of dag_processor_manager logfile.
#
# Variable: AIRFLOW__LOGGING__DAG_PROCESSOR_MANAGER_LOG_LOCATION
#
dag_processor_manager_log_location = /opt/airflow/logs/dag_processor_manager/dag_processor_manager.log

# Name of handler to read task instance logs.
# Defaults to use ``task`` handler.
#
# Variable: AIRFLOW__LOGGING__TASK_LOG_READER
#
task_log_reader = task

# A comma\-separated list of third-party logger names that will be configured to print messages to
# consoles\.
#
# Example: extra_logger_names = connexion,sqlalchemy
#
# Variable: AIRFLOW__LOGGING__EXTRA_LOGGER_NAMES
#
extra_logger_names = 

# When you start an airflow worker, airflow starts a tiny web server
# subprocess to serve the workers local log files to the airflow main
# web server, who then builds pages and sends them to users. This defines
# the port on which the logs are served. It needs to be unused, and open
# visible from the main web server to connect into the workers.
#
# Variable: AIRFLOW__LOGGING__WORKER_LOG_SERVER_PORT
#
worker_log_server_port = 8793

# Port to serve logs from for triggerer.  See worker_log_server_port description
# for more info.
#
# Variable: AIRFLOW__LOGGING__TRIGGER_LOG_SERVER_PORT
#
trigger_log_server_port = 8794

# We must parse timestamps to interleave logs between trigger and task.  To do so,
# we need to parse timestamps in log files. In case your log format is non-standard,
# you may provide import path to callable which takes a string log line and returns
# the timestamp (datetime.datetime compatible).
#
# Example: interleave_timestamp_parser = path.to.my_func
#
# Variable: AIRFLOW__LOGGING__INTERLEAVE_TIMESTAMP_PARSER
#
# interleave_timestamp_parser = 

# Permissions in the form or of octal string as understood by chmod. The permissions are important
# when you use impersonation, when logs are written by a different user than airflow. The most secure
# way of configuring it in this case is to add both users to the same group and make it the default
# group of both users. Group-writeable logs are default in airflow, but you might decide that you are
# OK with having the logs other-writeable, in which case you should set it to `0o777`. You might
# decide to add more security if you do not use impersonation and change it to `0o755` to make it
# only owner-writeable. You can also make it just readable only for owner by changing it to `0o700` if
# all the access (read/write) for your logs happens from the same user.
#
# Example: file_task_handler_new_folder_permissions = 0o775
#
# Variable: AIRFLOW__LOGGING__FILE_TASK_HANDLER_NEW_FOLDER_PERMISSIONS
#
file_task_handler_new_folder_permissions = 0o775

# Permissions in the form or of octal string as understood by chmod. The permissions are important
# when you use impersonation, when logs are written by a different user than airflow. The most secure
# way of configuring it in this case is to add both users to the same group and make it the default
# group of both users. Group-writeable logs are default in airflow, but you might decide that you are
# OK with having the logs other-writeable, in which case you should set it to `0o666`. You might
# decide to add more security if you do not use impersonation and change it to `0o644` to make it
# only owner-writeable. You can also make it just readable only for owner by changing it to `0o600` if
# all the access (read/write) for your logs happens from the same user.
#
# Example: file_task_handler_new_file_permissions = 0o664
#
# Variable: AIRFLOW__LOGGING__FILE_TASK_HANDLER_NEW_FILE_PERMISSIONS
#
file_task_handler_new_file_permissions = 0o664

# By default Celery sends all logs into stderr.
# If enabled any previous logging handlers will get *removed*.
# With this option AirFlow will create new handlers
# and send low level logs like INFO and WARNING to stdout,
# while sending higher severity logs to stderr.
#
# Variable: AIRFLOW__LOGGING__CELERY_STDOUT_STDERR_SEPARATION
#
celery_stdout_stderr_separation = False

[metrics]
# StatsD (https://github.com/etsy/statsd) integration settings.

# If you want to avoid emitting all the available metrics, you can configure an
# allow list of prefixes (comma separated) to send only the metrics that start
# with the elements of the list (e.g: "scheduler,executor,dagrun")
#
# Variable: AIRFLOW__METRICS__METRICS_ALLOW_LIST
#
metrics_allow_list = 

# If you want to avoid emitting all the available metrics, you can configure a
# block list of prefixes (comma separated) to filter out metrics that start with
# the elements of the list (e.g: "scheduler,executor,dagrun").
# If metrics_allow_list and metrics_block_list are both configured, metrics_block_list is ignored.
#
# Variable: AIRFLOW__METRICS__METRICS_BLOCK_LIST
#
metrics_block_list = 

# Enables sending metrics to StatsD.
#
# Variable: AIRFLOW__METRICS__STATSD_ON
#
statsd_on = False

#
# Variable: AIRFLOW__METRICS__STATSD_HOST
#
statsd_host = localhost

#
# Variable: AIRFLOW__METRICS__STATSD_PORT
#
statsd_port = 8125

#
# Variable: AIRFLOW__METRICS__STATSD_PREFIX
#
statsd_prefix = airflow

# A function that validate the StatsD stat name, apply changes to the stat name if necessary and return
# the transformed stat name.
# 
# The function should have the following signature:
# def func_name(stat_name: str) -> str:
#
# Variable: AIRFLOW__METRICS__STAT_NAME_HANDLER
#
stat_name_handler = 

# To enable datadog integration to send airflow metrics.
#
# Variable: AIRFLOW__METRICS__STATSD_DATADOG_ENABLED
#
statsd_datadog_enabled = False

# List of datadog tags attached to all metrics(e.g: key1:value1,key2:value2)
#
# Variable: AIRFLOW__METRICS__STATSD_DATADOG_TAGS
#
statsd_datadog_tags = 

# Set to False to disable metadata tags for some of the emitted metrics
#
# Variable: AIRFLOW__METRICS__STATSD_DATADOG_METRICS_TAGS
#
statsd_datadog_metrics_tags = True

# If you want to utilise your own custom StatsD client set the relevant
# module path below.
# Note: The module path must exist on your PYTHONPATH for Airflow to pick it up
#
# Variable: AIRFLOW__METRICS__STATSD_CUSTOM_CLIENT_PATH
#
# statsd_custom_client_path = 

# If you want to avoid sending all the available metrics tags to StatsD,
# you can configure a block list of prefixes (comma separated) to filter out metric tags
# that start with the elements of the list (e.g: "job_id,run_id")
#
# Example: statsd_disabled_tags = job_id,run_id,dag_id,task_id
#
# Variable: AIRFLOW__METRICS__STATSD_DISABLED_TAGS
#
statsd_disabled_tags = job_id,run_id

# To enable sending Airflow metrics with StatsD-Influxdb tagging convention.
#
# Variable: AIRFLOW__METRICS__STATSD_INFLUXDB_ENABLED
#
statsd_influxdb_enabled = False

# Enables sending metrics to OpenTelemetry.
#
# Variable: AIRFLOW__METRICS__OTEL_ON
#
otel_on = False

#
# Variable: AIRFLOW__METRICS__OTEL_HOST
#
otel_host = localhost

#
# Variable: AIRFLOW__METRICS__OTEL_PORT
#
otel_port = 8889

#
# Variable: AIRFLOW__METRICS__OTEL_PREFIX
#
otel_prefix = airflow

#
# Variable: AIRFLOW__METRICS__OTEL_INTERVAL_MILLISECONDS
#
otel_interval_milliseconds = 60000

# If True, all metrics are also emitted to the console.  Defaults to False.
#
# Variable: AIRFLOW__METRICS__OTEL_DEBUGGING_ON
#
otel_debugging_on = False

# If True, SSL will be enabled.  Defaults to False.
# To establish an HTTPS connection to the OpenTelemetry collector,
# you need to configure the SSL certificate and key within the OpenTelemetry collector's
# config.yml file.
#
# Variable: AIRFLOW__METRICS__OTEL_SSL_ACTIVE
#
otel_ssl_active = False

[secrets]
# Full class name of secrets backend to enable (will precede env vars and metastore in search path)
#
# Example: backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend
#
# Variable: AIRFLOW__SECRETS__BACKEND
#
backend = 

# The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class.
# See documentation for the secrets backend you are using. JSON is expected.
# Example for AWS Systems Manager ParameterStore:
# ``{"connections_prefix": "/airflow/connections", "profile_name": "default"}``
#
# Variable: AIRFLOW__SECRETS__BACKEND_KWARGS
#
backend_kwargs = 

# .. note:: |experimental|
# 
# Enables local caching of Variables, when parsing DAGs only.
# Using this option can make dag parsing faster if Variables are used in top level code, at the expense
# of longer propagation time for changes.
# Please note that this cache concerns only the DAG parsing step. There is no caching in place when DAG
# tasks are run.
#
# Variable: AIRFLOW__SECRETS__USE_CACHE
#
use_cache = False

# .. note:: |experimental|
# 
# When the cache is enabled, this is the duration for which we consider an entry in the cache to be
# valid. Entries are refreshed if they are older than this many seconds.
# It means that when the cache is enabled, this is the maximum amount of time you need to wait to see a
# Variable change take effect.
#
# Variable: AIRFLOW__SECRETS__CACHE_TTL_SECONDS
#
cache_ttl_seconds = 900

[cli]
# In what way should the cli access the API. The LocalClient will use the
# database directly, while the json_client will use the api running on the
# webserver
#
# Variable: AIRFLOW__CLI__API_CLIENT
#
api_client = airflow.api.client.local_client

# If you set web_server_url_prefix, do NOT forget to append it here, ex:
# ``endpoint_url = http://localhost:8080/myroot``
# So api will look like: ``http://localhost:8080/myroot/api/experimental/...``
#
# Variable: AIRFLOW__CLI__ENDPOINT_URL
#
endpoint_url = http://localhost:8080

[debug]
# Used only with ``DebugExecutor``. If set to ``True`` DAG will fail with first
# failed task. Helpful for debugging purposes.
#
# Variable: AIRFLOW__DEBUG__FAIL_FAST
#
fail_fast = False

[api]
# Enables the deprecated experimental API. Please note that these APIs do not have access control.
# The authenticated user has full access.
# 
# .. warning::
# 
#   This `Experimental REST API <https://airflow.readthedocs.io/en/latest/rest-api-ref.html>`__ is
#   deprecated since version 2.0. Please consider using
#   `the Stable REST API <https://airflow.readthedocs.io/en/latest/stable-rest-api-ref.html>`__.
#   For more information on migration, see
#   `RELEASE_NOTES.rst <https://github.com/apache/airflow/blob/main/RELEASE_NOTES.rst>`_
#
# Variable: AIRFLOW__API__ENABLE_EXPERIMENTAL_API
#
enable_experimental_api = False

# Comma separated list of auth backends to authenticate users of the API. See
# https://airflow.apache.org/docs/apache-airflow/stable/security/api.html for possible values.
# ("airflow.api.auth.backend.default" allows all requests for historic reasons)
#
# Variable: AIRFLOW__API__AUTH_BACKENDS
#
auth_backends = airflow.api.auth.backend.session

# Used to set the maximum page limit for API requests. If limit passed as param
# is greater than maximum page limit, it will be ignored and maximum page limit value
# will be set as the limit
#
# Variable: AIRFLOW__API__MAXIMUM_PAGE_LIMIT
#
maximum_page_limit = 100

# Used to set the default page limit when limit param is zero or not provided in API
# requests. Otherwise if positive integer is passed in the API requests as limit, the
# smallest number of user given limit or maximum page limit is taken as limit.
#
# Variable: AIRFLOW__API__FALLBACK_PAGE_LIMIT
#
fallback_page_limit = 100

# The intended audience for JWT token credentials used for authorization. This value must match on the client and server sides. If empty, audience will not be tested.
#
# Example: google_oauth2_audience = project-id-random-value.apps.googleusercontent.com
#
# Variable: AIRFLOW__API__GOOGLE_OAUTH2_AUDIENCE
#
google_oauth2_audience = 

# Path to Google Cloud Service Account key file (JSON). If omitted, authorization based on
# `the Application Default Credentials
# <https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
# be used.
#
# Example: google_key_path = /files/service-account-json
#
# Variable: AIRFLOW__API__GOOGLE_KEY_PATH
#
google_key_path = 

# Used in response to a preflight request to indicate which HTTP
# headers can be used when making the actual request. This header is
# the server side response to the browser's
# Access-Control-Request-Headers header.
#
# Variable: AIRFLOW__API__ACCESS_CONTROL_ALLOW_HEADERS
#
access_control_allow_headers = 

# Specifies the method or methods allowed when accessing the resource.
#
# Variable: AIRFLOW__API__ACCESS_CONTROL_ALLOW_METHODS
#
access_control_allow_methods = 

# Indicates whether the response can be shared with requesting code from the given origins.
# Separate URLs with space.
#
# Variable: AIRFLOW__API__ACCESS_CONTROL_ALLOW_ORIGINS
#
access_control_allow_origins = 

# Indicates whether the *xcomEntries* endpoint supports the *deserialize*
# flag. If set to False, setting this flag in a request would result in a
# 400 Bad Request error.
#
# Variable: AIRFLOW__API__ENABLE_XCOM_DESERIALIZE_SUPPORT
#
enable_xcom_deserialize_support = False

[lineage]
# what lineage backend to use
#
# Variable: AIRFLOW__LINEAGE__BACKEND
#
backend = 

[operators]
# The default owner assigned to each new operator, unless
# provided explicitly or passed via ``default_args``
#
# Variable: AIRFLOW__OPERATORS__DEFAULT_OWNER
#
default_owner = airflow

# The default value of attribute "deferrable" in operators and sensors.
#
# Variable: AIRFLOW__OPERATORS__DEFAULT_DEFERRABLE
#
default_deferrable = false

#
# Variable: AIRFLOW__OPERATORS__DEFAULT_CPUS
#
default_cpus = 1

#
# Variable: AIRFLOW__OPERATORS__DEFAULT_RAM
#
default_ram = 512

#
# Variable: AIRFLOW__OPERATORS__DEFAULT_DISK
#
default_disk = 512

#
# Variable: AIRFLOW__OPERATORS__DEFAULT_GPUS
#
default_gpus = 0

# Default queue that tasks get assigned to and that worker listen on.
#
# Variable: AIRFLOW__OPERATORS__DEFAULT_QUEUE
#
default_queue = default

# Is allowed to pass additional/unused arguments (args, kwargs) to the BaseOperator operator.
# If set to False, an exception will be thrown, otherwise only the console message will be displayed.
#
# Variable: AIRFLOW__OPERATORS__ALLOW_ILLEGAL_ARGUMENTS
#
allow_illegal_arguments = False

[webserver]
# The message displayed when a user attempts to execute actions beyond their authorised privileges.
#
# Variable: AIRFLOW__WEBSERVER__ACCESS_DENIED_MESSAGE
#
access_denied_message = Access is Denied

# Path of webserver config file used for configuring the webserver parameters
#
# Variable: AIRFLOW__WEBSERVER__CONFIG_FILE
#
config_file = /opt/airflow/webserver_config.py

# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is used in automated emails that
# airflow sends to point links to the right web server
#
# Variable: AIRFLOW__WEBSERVER__BASE_URL
#
base_url = http://localhost:8080

# Default timezone to display all dates in the UI, can be UTC, system, or
# any IANA timezone string (e.g. Europe/Amsterdam). If left empty the
# default value of core/default_timezone will be used
#
# Example: default_ui_timezone = America/New_York
#
# Variable: AIRFLOW__WEBSERVER__DEFAULT_UI_TIMEZONE
#
default_ui_timezone = UTC

# The ip specified when starting the web server
#
# Variable: AIRFLOW__WEBSERVER__WEB_SERVER_HOST
#
web_server_host = 0.0.0.0

# The port on which to run the web server
#
# Variable: AIRFLOW__WEBSERVER__WEB_SERVER_PORT
#
web_server_port = 8080

# Paths to the SSL certificate and key for the web server. When both are
# provided SSL will be enabled. This does not change the web server port.
#
# Variable: AIRFLOW__WEBSERVER__WEB_SERVER_SSL_CERT
#
web_server_ssl_cert = 

# Paths to the SSL certificate and key for the web server. When both are
# provided SSL will be enabled. This does not change the web server port.
#
# Variable: AIRFLOW__WEBSERVER__WEB_SERVER_SSL_KEY
#
web_server_ssl_key = 

# The type of backend used to store web session data, can be `database` or `securecookie`. For the
# `database` backend, sessions are store in the database (in `session` table) and they can be
# managed there (for example when you reset password of the user, all sessions for that user are
# deleted). For the `securecookie` backend, sessions are stored in encrypted cookies on the client
# side. The `securecookie` mechanism is 'lighter' than database backend, but sessions are not deleted
# when you reset password of the user, which means that other than waiting for expiry time, the only
# way to invalidate all sessions for a user is to change secret_key and restart webserver (which
# also invalidates and logs out all other user's sessions).
# 
# When you are using `database` backend, make sure to keep your database session table small
# by periodically running `airflow db clean --table session` command, especially if you have
# automated API calls that will create a new session for each call rather than reuse the sessions
# stored in browser cookies.
#
# Example: session_backend = securecookie
#
# Variable: AIRFLOW__WEBSERVER__SESSION_BACKEND
#
session_backend = database

# Number of seconds the webserver waits before killing gunicorn master that doesn't respond
#
# Variable: AIRFLOW__WEBSERVER__WEB_SERVER_MASTER_TIMEOUT
#
web_server_master_timeout = 120

# Number of seconds the gunicorn webserver waits before timing out on a worker
#
# Variable: AIRFLOW__WEBSERVER__WEB_SERVER_WORKER_TIMEOUT
#
web_server_worker_timeout = 120

# Number of workers to refresh at a time. When set to 0, worker refresh is
# disabled. When nonzero, airflow periodically refreshes webserver workers by
# bringing up new ones and killing old ones.
#
# Variable: AIRFLOW__WEBSERVER__WORKER_REFRESH_BATCH_SIZE
#
worker_refresh_batch_size = 1

# Number of seconds to wait before refreshing a batch of workers.
#
# Variable: AIRFLOW__WEBSERVER__WORKER_REFRESH_INTERVAL
#
worker_refresh_interval = 6000

# If set to True, Airflow will track files in plugins_folder directory. When it detects changes,
# then reload the gunicorn. If set to True, gunicorn starts without preloading, which is slower, uses
# more memory, and may cause race conditions. Avoid setting this to True in production.
#
# Variable: AIRFLOW__WEBSERVER__RELOAD_ON_PLUGIN_CHANGE
#
reload_on_plugin_change = False

# Secret key used to run your flask app. It should be as random as possible. However, when running
# more than 1 instances of webserver, make sure all of them use the same ``secret_key`` otherwise
# one of them will error with "CSRF session token is missing".
# The webserver key is also used to authorize requests to Celery workers when logs are retrieved.
# The token generated using the secret key has a short expiry time though - make sure that time on
# ALL the machines that you run airflow components on is synchronized (for example using ntpd)
# otherwise you might get "forbidden" errors when the logs are accessed.
#
# Variable: AIRFLOW__WEBSERVER__SECRET_KEY
#
secret_key = krlaR3URsBeEVAxYRfsm8w==

# Number of workers to run the Gunicorn web server
#
# Variable: AIRFLOW__WEBSERVER__WORKERS
#
workers = 4

# The worker class gunicorn should use. Choices include
# sync (default), eventlet, gevent. Note when using gevent you might also want to set the
# "_AIRFLOW_PATCH_GEVENT" environment variable to "1" to make sure gevent patching is done as
# early as possible.
#
# Variable: AIRFLOW__WEBSERVER__WORKER_CLASS
#
worker_class = sync

# Log files for the gunicorn webserver. '-' means log to stderr.
#
# Variable: AIRFLOW__WEBSERVER__ACCESS_LOGFILE
#
access_logfile = -

# Log files for the gunicorn webserver. '-' means log to stderr.
#
# Variable: AIRFLOW__WEBSERVER__ERROR_LOGFILE
#
error_logfile = -

# Access log format for gunicorn webserver.
# default format is %%(h)s %%(l)s %%(u)s %%(t)s "%%(r)s" %%(s)s %%(b)s "%%(f)s" "%%(a)s"
# documentation - https://docs.gunicorn.org/en/stable/settings.html#access-log-format
#
# Variable: AIRFLOW__WEBSERVER__ACCESS_LOGFORMAT
#
access_logformat = 

# Expose the configuration file in the web server. Set to "non-sensitive-only" to show all values
# except those that have security implications. "True" shows all values. "False" hides the
# configuration completely.
#
# Variable: AIRFLOW__WEBSERVER__EXPOSE_CONFIG
#
expose_config = False

# Expose hostname in the web server
#
# Variable: AIRFLOW__WEBSERVER__EXPOSE_HOSTNAME
#
expose_hostname = False

# Expose stacktrace in the web server
#
# Variable: AIRFLOW__WEBSERVER__EXPOSE_STACKTRACE
#
expose_stacktrace = False

# Default DAG view. Valid values are: ``grid``, ``graph``, ``duration``, ``gantt``, ``landing_times``
#
# Variable: AIRFLOW__WEBSERVER__DAG_DEFAULT_VIEW
#
dag_default_view = grid

# Default DAG orientation. Valid values are:
# ``LR`` (Left->Right), ``TB`` (Top->Bottom), ``RL`` (Right->Left), ``BT`` (Bottom->Top)
#
# Variable: AIRFLOW__WEBSERVER__DAG_ORIENTATION
#
dag_orientation = LR

# Sorting order in grid view. Valid values are: ``topological``, ``hierarchical_alphabetical``
#
# Variable: AIRFLOW__WEBSERVER__GRID_VIEW_SORTING_ORDER
#
grid_view_sorting_order = topological

# The amount of time (in secs) webserver will wait for initial handshake
# while fetching logs from other worker machine
#
# Variable: AIRFLOW__WEBSERVER__LOG_FETCH_TIMEOUT_SEC
#
log_fetch_timeout_sec = 5

# Time interval (in secs) to wait before next log fetching.
#
# Variable: AIRFLOW__WEBSERVER__LOG_FETCH_DELAY_SEC
#
log_fetch_delay_sec = 2

# Distance away from page bottom to enable auto tailing.
#
# Variable: AIRFLOW__WEBSERVER__LOG_AUTO_TAILING_OFFSET
#
log_auto_tailing_offset = 30

# Animation speed for auto tailing log display.
#
# Variable: AIRFLOW__WEBSERVER__LOG_ANIMATION_SPEED
#
log_animation_speed = 1000

# By default, the webserver shows paused DAGs. Flip this to hide paused
# DAGs by default
#
# Variable: AIRFLOW__WEBSERVER__HIDE_PAUSED_DAGS_BY_DEFAULT
#
hide_paused_dags_by_default = False

# Consistent page size across all listing views in the UI
#
# Variable: AIRFLOW__WEBSERVER__PAGE_SIZE
#
page_size = 100

# Define the color of navigation bar
#
# Variable: AIRFLOW__WEBSERVER__NAVBAR_COLOR
#
navbar_color = #fff

# Default dagrun to show in UI
#
# Variable: AIRFLOW__WEBSERVER__DEFAULT_DAG_RUN_DISPLAY_NUMBER
#
default_dag_run_display_number = 25

# Enable werkzeug ``ProxyFix`` middleware for reverse proxy
#
# Variable: AIRFLOW__WEBSERVER__ENABLE_PROXY_FIX
#
enable_proxy_fix = False

# Number of values to trust for ``X-Forwarded-For``.
# More info: https://werkzeug.palletsprojects.com/en/0.16.x/middleware/proxy_fix/
#
# Variable: AIRFLOW__WEBSERVER__PROXY_FIX_X_FOR
#
proxy_fix_x_for = 1

# Number of values to trust for ``X-Forwarded-Proto``
#
# Variable: AIRFLOW__WEBSERVER__PROXY_FIX_X_PROTO
#
proxy_fix_x_proto = 1

# Number of values to trust for ``X-Forwarded-Host``
#
# Variable: AIRFLOW__WEBSERVER__PROXY_FIX_X_HOST
#
proxy_fix_x_host = 1

# Number of values to trust for ``X-Forwarded-Port``
#
# Variable: AIRFLOW__WEBSERVER__PROXY_FIX_X_PORT
#
proxy_fix_x_port = 1

# Number of values to trust for ``X-Forwarded-Prefix``
#
# Variable: AIRFLOW__WEBSERVER__PROXY_FIX_X_PREFIX
#
proxy_fix_x_prefix = 1

# Set secure flag on session cookie
#
# Variable: AIRFLOW__WEBSERVER__COOKIE_SECURE
#
cookie_secure = False

# Set samesite policy on session cookie
#
# Variable: AIRFLOW__WEBSERVER__COOKIE_SAMESITE
#
cookie_samesite = Lax

# Default setting for wrap toggle on DAG code and TI log views.
#
# Variable: AIRFLOW__WEBSERVER__DEFAULT_WRAP
#
default_wrap = False

# Allow the UI to be rendered in a frame
#
# Variable: AIRFLOW__WEBSERVER__X_FRAME_ENABLED
#
x_frame_enabled = True

# Send anonymous user activity to your analytics tool
# choose from google_analytics, segment, or metarouter
#
# Variable: AIRFLOW__WEBSERVER__ANALYTICS_TOOL
#
# analytics_tool = 

# Unique ID of your account in the analytics tool
#
# Variable: AIRFLOW__WEBSERVER__ANALYTICS_ID
#
# analytics_id = 

# 'Recent Tasks' stats will show for old DagRuns if set
#
# Variable: AIRFLOW__WEBSERVER__SHOW_RECENT_STATS_FOR_COMPLETED_RUNS
#
show_recent_stats_for_completed_runs = True

# Update FAB permissions and sync security manager roles
# on webserver startup
#
# Variable: AIRFLOW__WEBSERVER__UPDATE_FAB_PERMS
#
update_fab_perms = True

# The UI cookie lifetime in minutes. User will be logged out from UI after
# ``session_lifetime_minutes`` of non-activity
#
# Variable: AIRFLOW__WEBSERVER__SESSION_LIFETIME_MINUTES
#
session_lifetime_minutes = 43200

# Sets a custom page title for the DAGs overview page and site title for all pages
#
# Variable: AIRFLOW__WEBSERVER__INSTANCE_NAME
#
# instance_name = 

# Whether the custom page title for the DAGs overview page contains any Markup language
#
# Variable: AIRFLOW__WEBSERVER__INSTANCE_NAME_HAS_MARKUP
#
instance_name_has_markup = False

# How frequently, in seconds, the DAG data will auto-refresh in graph or grid view
# when auto-refresh is turned on
#
# Variable: AIRFLOW__WEBSERVER__AUTO_REFRESH_INTERVAL
#
auto_refresh_interval = 3

# Boolean for displaying warning for publicly viewable deployment
#
# Variable: AIRFLOW__WEBSERVER__WARN_DEPLOYMENT_EXPOSURE
#
warn_deployment_exposure = True

# Comma separated string of view events to exclude from dag audit view.
# All other events will be added minus the ones passed here.
# The audit logs in the db will not be affected by this parameter.
#
# Variable: AIRFLOW__WEBSERVER__AUDIT_VIEW_EXCLUDED_EVENTS
#
audit_view_excluded_events = gantt,landing_times,tries,duration,calendar,graph,grid,tree,tree_data

# Comma separated string of view events to include in dag audit view.
# If passed, only these events will populate the dag audit view.
# The audit logs in the db will not be affected by this parameter.
#
# Example: audit_view_included_events = dagrun_cleared,failed
#
# Variable: AIRFLOW__WEBSERVER__AUDIT_VIEW_INCLUDED_EVENTS
#
# audit_view_included_events = 

# Boolean for running SwaggerUI in the webserver.
#
# Variable: AIRFLOW__WEBSERVER__ENABLE_SWAGGER_UI
#
enable_swagger_ui = True

# Boolean for running Internal API in the webserver.
#
# Variable: AIRFLOW__WEBSERVER__RUN_INTERNAL_API
#
run_internal_api = False

# Boolean for enabling rate limiting on authentication endpoints.
#
# Variable: AIRFLOW__WEBSERVER__AUTH_RATE_LIMITED
#
auth_rate_limited = True

# Rate limit for authentication endpoints.
#
# Variable: AIRFLOW__WEBSERVER__AUTH_RATE_LIMIT
#
auth_rate_limit = 5 per 40 second

# The caching algorithm used by the webserver. Must be a valid hashlib function name.
#
# Example: caching_hash_method = sha256
#
# Variable: AIRFLOW__WEBSERVER__CACHING_HASH_METHOD
#
caching_hash_method = md5

# Behavior of the trigger DAG run button for DAGs without params. False to skip and trigger
# without displaying a form to add a dag_run.conf, True to always display the form.
# The form is displayed always if parameters are defined.
#
# Variable: AIRFLOW__WEBSERVER__SHOW_TRIGGER_FORM_IF_NO_PARAMS
#
show_trigger_form_if_no_params = False

[email]
# Configuration email backend and whether to
# send email alerts on retry or failure

# Email backend to use
#
# Variable: AIRFLOW__EMAIL__EMAIL_BACKEND
#
email_backend = airflow.utils.email.send_email_smtp

# Email connection to use
#
# Variable: AIRFLOW__EMAIL__EMAIL_CONN_ID
#
email_conn_id = smtp_default

# Whether email alerts should be sent when a task is retried
#
# Variable: AIRFLOW__EMAIL__DEFAULT_EMAIL_ON_RETRY
#
default_email_on_retry = True

# Whether email alerts should be sent when a task failed
#
# Variable: AIRFLOW__EMAIL__DEFAULT_EMAIL_ON_FAILURE
#
default_email_on_failure = True

# File that will be used as the template for Email subject (which will be rendered using Jinja2).
# If not set, Airflow uses a base template.
#
# Example: subject_template = /path/to/my_subject_template_file
#
# Variable: AIRFLOW__EMAIL__SUBJECT_TEMPLATE
#
# subject_template = 

# File that will be used as the template for Email content (which will be rendered using Jinja2).
# If not set, Airflow uses a base template.
#
# Example: html_content_template = /path/to/my_html_content_template_file
#
# Variable: AIRFLOW__EMAIL__HTML_CONTENT_TEMPLATE
#
# html_content_template = 

# Email address that will be used as sender address.
# It can either be raw email or the complete address in a format ``Sender Name <sender@email.com>``
#
# Example: from_email = Airflow <airflow@example.com>
#
# Variable: AIRFLOW__EMAIL__FROM_EMAIL
#
# from_email = 

# ssl context to use when using SMTP and IMAP SSL connections. By default, the context is "default"
# which sets it to ``ssl.create_default_context()`` which provides the right balance between
# compatibility and security, it however requires that certificates in your operating system are
# updated and that SMTP/IMAP servers of yours have valid certificates that have corresponding public
# keys installed on your machines. You can switch it to "none" if you want to disable checking
# of the certificates, but it is not recommended as it allows MITM (man-in-the-middle) attacks
# if your infrastructure is not sufficiently secured. It should only be set temporarily while you
# are fixing your certificate configuration. This can be typically done by upgrading to newer
# version of the operating system you run Airflow components on,by upgrading/refreshing proper
# certificates in the OS or by updating certificates for your mail servers.
#
# Example: ssl_context = default
#
# Variable: AIRFLOW__EMAIL__SSL_CONTEXT
#
ssl_context = default

[smtp]
# If you want airflow to send emails on retries, failure, and you want to use
# the airflow.utils.email.send_email_smtp function, you have to configure an
# smtp server here

#
# Variable: AIRFLOW__SMTP__SMTP_HOST
#
smtp_host = localhost

#
# Variable: AIRFLOW__SMTP__SMTP_STARTTLS
#
smtp_starttls = True

#
# Variable: AIRFLOW__SMTP__SMTP_SSL
#
smtp_ssl = False

#
# Example: smtp_user = airflow
#
# Variable: AIRFLOW__SMTP__SMTP_USER
#
# smtp_user = 

#
# Example: smtp_password = airflow
#
# Variable: AIRFLOW__SMTP__SMTP_PASSWORD
#
# smtp_password = 

#
# Variable: AIRFLOW__SMTP__SMTP_PORT
#
smtp_port = 25

#
# Variable: AIRFLOW__SMTP__SMTP_MAIL_FROM
#
smtp_mail_from = airflow@example.com

#
# Variable: AIRFLOW__SMTP__SMTP_TIMEOUT
#
smtp_timeout = 30

#
# Variable: AIRFLOW__SMTP__SMTP_RETRY_LIMIT
#
smtp_retry_limit = 5

[sentry]
# Sentry (https://docs.sentry.io) integration. Here you can supply
# additional configuration options based on the Python platform. See:
# https://docs.sentry.io/error-reporting/configuration/?platform=python.
# Unsupported options: ``integrations``, ``in_app_include``, ``in_app_exclude``,
# ``ignore_errors``, ``before_breadcrumb``, ``transport``.

# Enable error reporting to Sentry
#
# Variable: AIRFLOW__SENTRY__SENTRY_ON
#
sentry_on = false

#
# Variable: AIRFLOW__SENTRY__SENTRY_DSN
#
sentry_dsn = 

# Dotted path to a before_send function that the sentry SDK should be configured to use.
#
# Variable: AIRFLOW__SENTRY__BEFORE_SEND
#
# before_send = 

[scheduler]
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they should
# listen (in seconds).
#
# Variable: AIRFLOW__SCHEDULER__JOB_HEARTBEAT_SEC
#
job_heartbeat_sec = 5

# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
#
# Variable: AIRFLOW__SCHEDULER__SCHEDULER_HEARTBEAT_SEC
#
scheduler_heartbeat_sec = 5

# The frequency (in seconds) at which the LocalTaskJob should send heartbeat signals to the
# scheduler to notify it's still alive. If this value is set to 0, the heartbeat interval will default
# to the value of scheduler_zombie_task_threshold.
#
# Variable: AIRFLOW__SCHEDULER__LOCAL_TASK_JOB_HEARTBEAT_SEC
#
local_task_job_heartbeat_sec = 0

# The number of times to try to schedule each DAG file
# -1 indicates unlimited number
#
# Variable: AIRFLOW__SCHEDULER__NUM_RUNS
#
num_runs = -1

# Controls how long the scheduler will sleep between loops, but if there was nothing to do
# in the loop. i.e. if it scheduled something then it will start the next loop
# iteration straight away.
#
# Variable: AIRFLOW__SCHEDULER__SCHEDULER_IDLE_SLEEP_TIME
#
scheduler_idle_sleep_time = 1

# Number of seconds after which a DAG file is parsed. The DAG file is parsed every
# ``min_file_process_interval`` number of seconds. Updates to DAGs are reflected after
# this interval. Keeping this number low will increase CPU usage.
#
# Variable: AIRFLOW__SCHEDULER__MIN_FILE_PROCESS_INTERVAL
#
min_file_process_interval = 30

# How often (in seconds) to check for stale DAGs (DAGs which are no longer present in
# the expected files) which should be deactivated, as well as datasets that are no longer
# referenced and should be marked as orphaned.
#
# Variable: AIRFLOW__SCHEDULER__PARSING_CLEANUP_INTERVAL
#
parsing_cleanup_interval = 60

# How long (in seconds) to wait after we have re-parsed a DAG file before deactivating stale
# DAGs (DAGs which are no longer present in the expected files). The reason why we need
# this threshold is to account for the time between when the file is parsed and when the
# DAG is loaded. The absolute maximum that this could take is `dag_file_processor_timeout`,
# but when you have a long timeout configured, it results in a significant delay in the
# deactivation of stale dags.
#
# Variable: AIRFLOW__SCHEDULER__STALE_DAG_THRESHOLD
#
stale_dag_threshold = 50

# How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes.
#
# Variable: AIRFLOW__SCHEDULER__DAG_DIR_LIST_INTERVAL
#
dag_dir_list_interval = 300

# How often should stats be printed to the logs. Setting to 0 will disable printing stats
#
# Variable: AIRFLOW__SCHEDULER__PRINT_STATS_INTERVAL
#
print_stats_interval = 30

# How often (in seconds) should pool usage stats be sent to StatsD (if statsd_on is enabled)
#
# Variable: AIRFLOW__SCHEDULER__POOL_METRICS_INTERVAL
#
pool_metrics_interval = 5.0

# If the last scheduler heartbeat happened more than scheduler_health_check_threshold
# ago (in seconds), scheduler is considered unhealthy.
# This is used by the health check in the "/health" endpoint and in `airflow jobs check` CLI
# for SchedulerJob.
#
# Variable: AIRFLOW__SCHEDULER__SCHEDULER_HEALTH_CHECK_THRESHOLD
#
scheduler_health_check_threshold = 30

# When you start a scheduler, airflow starts a tiny web server
# subprocess to serve a health check if this is set to True
#
# Variable: AIRFLOW__SCHEDULER__ENABLE_HEALTH_CHECK
#
enable_health_check = False

# When you start a scheduler, airflow starts a tiny web server
# subprocess to serve a health check on this port
#
# Variable: AIRFLOW__SCHEDULER__SCHEDULER_HEALTH_CHECK_SERVER_PORT
#
scheduler_health_check_server_port = 8974

# How often (in seconds) should the scheduler check for orphaned tasks and SchedulerJobs
#
# Variable: AIRFLOW__SCHEDULER__ORPHANED_TASKS_CHECK_INTERVAL
#
orphaned_tasks_check_interval = 300.0

#
# Variable: AIRFLOW__SCHEDULER__CHILD_PROCESS_LOG_DIRECTORY
#
child_process_log_directory = /opt/airflow/logs/scheduler

# Local task jobs periodically heartbeat to the DB. If the job has
# not heartbeat in this many seconds, the scheduler will mark the
# associated task instance as failed and will re-schedule the task.
#
# Variable: AIRFLOW__SCHEDULER__SCHEDULER_ZOMBIE_TASK_THRESHOLD
#
scheduler_zombie_task_threshold = 300

# How often (in seconds) should the scheduler check for zombie tasks.
#
# Variable: AIRFLOW__SCHEDULER__ZOMBIE_DETECTION_INTERVAL
#
zombie_detection_interval = 10.0

# Turn off scheduler catchup by setting this to ``False``.
# Default behavior is unchanged and
# Command Line Backfills still work, but the scheduler
# will not do scheduler catchup if this is ``False``,
# however it can be set on a per DAG basis in the
# DAG definition (catchup)
#
# Variable: AIRFLOW__SCHEDULER__CATCHUP_BY_DEFAULT
#
catchup_by_default = True

# Setting this to True will make first task instance of a task
# ignore depends_on_past setting. A task instance will be considered
# as the first task instance of a task when there is no task instance
# in the DB with an execution_date earlier than it., i.e. no manual marking
# success will be needed for a newly added task to be scheduled.
#
# Variable: AIRFLOW__SCHEDULER__IGNORE_FIRST_DEPENDS_ON_PAST_BY_DEFAULT
#
ignore_first_depends_on_past_by_default = True

# This changes the batch size of queries in the scheduling main loop.
# This should not be greater than ``core.parallelism``.
# If this is too high, SQL query performance may be impacted by
# complexity of query predicate, and/or excessive locking.
# Additionally, you may hit the maximum allowable query length for your db.
# Set this to 0 to use the value of ``core.parallelism``
#
# Variable: AIRFLOW__SCHEDULER__MAX_TIS_PER_QUERY
#
max_tis_per_query = 16

# Should the scheduler issue ``SELECT ... FOR UPDATE`` in relevant queries.
# If this is set to False then you should not run more than a single
# scheduler at once
#
# Variable: AIRFLOW__SCHEDULER__USE_ROW_LEVEL_LOCKING
#
use_row_level_locking = True

# Max number of DAGs to create DagRuns for per scheduler loop.
#
# Variable: AIRFLOW__SCHEDULER__MAX_DAGRUNS_TO_CREATE_PER_LOOP
#
max_dagruns_to_create_per_loop = 10

# How many DagRuns should a scheduler examine (and lock) when scheduling
# and queuing tasks.
#
# Variable: AIRFLOW__SCHEDULER__MAX_DAGRUNS_PER_LOOP_TO_SCHEDULE
#
max_dagruns_per_loop_to_schedule = 20

# Should the Task supervisor process perform a "mini scheduler" to attempt to schedule more tasks of the
# same DAG. Leaving this on will mean tasks in the same DAG execute quicker, but might starve out other
# dags in some circumstances
#
# Variable: AIRFLOW__SCHEDULER__SCHEDULE_AFTER_TASK_EXECUTION
#
schedule_after_task_execution = True

# The scheduler reads dag files to extract the airflow modules that are going to be used,
# and imports them ahead of time to avoid having to re-do it for each parsing process.
# This flag can be set to False to disable this behavior in case an airflow module needs to be freshly
# imported each time (at the cost of increased DAG parsing time).
#
# Variable: AIRFLOW__SCHEDULER__PARSING_PRE_IMPORT_MODULES
#
parsing_pre_import_modules = True

# The scheduler can run multiple processes in parallel to parse dags.
# This defines how many processes will run.
#
# Variable: AIRFLOW__SCHEDULER__PARSING_PROCESSES
#
parsing_processes = 2

# One of ``modified_time``, ``random_seeded_by_host`` and ``alphabetical``.
# The scheduler will list and sort the dag files to decide the parsing order.
# 
# * ``modified_time``: Sort by modified time of the files. This is useful on large scale to parse the
#   recently modified DAGs first.
# * ``random_seeded_by_host``: Sort randomly across multiple Schedulers but with same order on the
#   same host. This is useful when running with Scheduler in HA mode where each scheduler can
#   parse different DAG files.
# * ``alphabetical``: Sort by filename
#
# Variable: AIRFLOW__SCHEDULER__FILE_PARSING_SORT_MODE
#
file_parsing_sort_mode = modified_time

# Whether the dag processor is running as a standalone process or it is a subprocess of a scheduler
# job.
#
# Variable: AIRFLOW__SCHEDULER__STANDALONE_DAG_PROCESSOR
#
standalone_dag_processor = False

# Only applicable if `[scheduler]standalone_dag_processor` is true and  callbacks are stored
# in database. Contains maximum number of callbacks that are fetched during a single loop.
#
# Variable: AIRFLOW__SCHEDULER__MAX_CALLBACKS_PER_LOOP
#
max_callbacks_per_loop = 20

# Only applicable if `[scheduler]standalone_dag_processor` is true.
# Time in seconds after which dags, which were not updated by Dag Processor are deactivated.
#
# Variable: AIRFLOW__SCHEDULER__DAG_STALE_NOT_SEEN_DURATION
#
dag_stale_not_seen_duration = 600

# Turn off scheduler use of cron intervals by setting this to False.
# DAGs submitted manually in the web UI or with trigger_dag will still run.
#
# Variable: AIRFLOW__SCHEDULER__USE_JOB_SCHEDULE
#
use_job_schedule = True

# Allow externally triggered DagRuns for Execution Dates in the future
# Only has effect if schedule_interval is set to None in DAG
#
# Variable: AIRFLOW__SCHEDULER__ALLOW_TRIGGER_IN_FUTURE
#
allow_trigger_in_future = False

# How often to check for expired trigger requests that have not run yet.
#
# Variable: AIRFLOW__SCHEDULER__TRIGGER_TIMEOUT_CHECK_INTERVAL
#
trigger_timeout_check_interval = 15

# Amount of time a task can be in the queued state before being retried or set to failed.
#
# Variable: AIRFLOW__SCHEDULER__TASK_QUEUED_TIMEOUT
#
task_queued_timeout = 600.0

# How often to check for tasks that have been in the queued state for
# longer than `[scheduler] task_queued_timeout`.
#
# Variable: AIRFLOW__SCHEDULER__TASK_QUEUED_TIMEOUT_CHECK_INTERVAL
#
task_queued_timeout_check_interval = 120.0

# The run_id pattern used to verify the validity of user input to the run_id parameter when
# triggering a DAG. This pattern cannot change the pattern used by scheduler to generate run_id
# for scheduled DAG runs or DAG runs triggered without changing the run_id parameter.
#
# Variable: AIRFLOW__SCHEDULER__ALLOWED_RUN_ID_PATTERN
#
allowed_run_id_pattern = ^[A-Za-z0-9_.~:+-]+$

[triggerer]
# How many triggers a single Triggerer will run at once, by default.
#
# Variable: AIRFLOW__TRIGGERER__DEFAULT_CAPACITY
#
default_capacity = 1000

# How often to heartbeat the Triggerer job to ensure it hasn't been killed.
#
# Variable: AIRFLOW__TRIGGERER__JOB_HEARTBEAT_SEC
#
job_heartbeat_sec = 5

# If the last triggerer heartbeat happened more than triggerer_health_check_threshold
# ago (in seconds), triggerer is considered unhealthy.
# This is used by the health check in the "/health" endpoint and in `airflow jobs check` CLI
# for TriggererJob.
#
# Variable: AIRFLOW__TRIGGERER__TRIGGERER_HEALTH_CHECK_THRESHOLD
#
triggerer_health_check_threshold = 30

[kerberos]
#
# Variable: AIRFLOW__KERBEROS__CCACHE
#
ccache = /tmp/airflow_krb5_ccache

# gets augmented with fqdn
#
# Variable: AIRFLOW__KERBEROS__PRINCIPAL
#
principal = airflow

#
# Variable: AIRFLOW__KERBEROS__REINIT_FREQUENCY
#
reinit_frequency = 3600

#
# Variable: AIRFLOW__KERBEROS__KINIT_PATH
#
kinit_path = kinit

#
# Variable: AIRFLOW__KERBEROS__KEYTAB
#
keytab = airflow.keytab

# Allow to disable ticket forwardability.
#
# Variable: AIRFLOW__KERBEROS__FORWARDABLE
#
forwardable = True

# Allow to remove source IP from token, useful when using token behind NATted Docker host.
#
# Variable: AIRFLOW__KERBEROS__INCLUDE_IP
#
include_ip = True

[sensors]
# Sensor default timeout, 7 days by default (7 * 24 * 60 * 60).
#
# Variable: AIRFLOW__SENSORS__DEFAULT_TIMEOUT
#
default_timeout = 604800

[aws]
# This section contains settings for Amazon Web Services (AWS) integration.

# session_factory = 
cloudwatch_task_handler_json_serializer = airflow.providers.amazon.aws.log.cloudwatch_task_handler.json_serialize_legacy

[aws_ecs_executor]
# This section only applies if you are using the AwsEcsExecutor in
# Airflow's ``[core]`` configuration.
# For more information on any of these execution parameters, see the link below:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs/client/run_task.html
# For boto3 credential management, see
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html

conn_id = aws_default
# region_name = 
assign_public_ip = False
# cluster = 
# container_name = 
launch_type = FARGATE
platform_version = LATEST
# security_groups = 
# subnets = 
# task_definition = 
max_run_task_attempts = 3
# run_task_kwargs = 

[celery_kubernetes_executor]
# This section only applies if you are using the ``CeleryKubernetesExecutor`` in
# ``[core]`` section above

# Define when to send a task to ``KubernetesExecutor`` when using ``CeleryKubernetesExecutor``.
# When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``),
# the task is executed via ``KubernetesExecutor``,
# otherwise via ``CeleryExecutor``
#
# Variable: AIRFLOW__CELERY_KUBERNETES_EXECUTOR__KUBERNETES_QUEUE
#
kubernetes_queue = kubernetes

[celery]
# This section only applies if you are using the CeleryExecutor in
# ``[core]`` section above

# The app name that will be used by celery
#
# Variable: AIRFLOW__CELERY__CELERY_APP_NAME
#
celery_app_name = airflow.providers.celery.executors.celery_executor

# The concurrency that will be used when starting workers with the
# ``airflow celery worker`` command. This defines the number of task instances that
# a worker will take, so size up your workers based on the resources on
# your worker box and the nature of your tasks
#
# Variable: AIRFLOW__CELERY__WORKER_CONCURRENCY
#
worker_concurrency = 16

# The maximum and minimum concurrency that will be used when starting workers with the
# ``airflow celery worker`` command (always keep minimum processes, but grow
# to maximum if necessary). Note the value should be max_concurrency,min_concurrency
# Pick these numbers based on resources on worker box and the nature of the task.
# If autoscale option is available, worker_concurrency will be ignored.
# http://docs.celeryproject.org/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale
#
# Example: worker_autoscale = 16,12
#
# Variable: AIRFLOW__CELERY__WORKER_AUTOSCALE
#
# worker_autoscale = 

# Used to increase the number of tasks that a worker prefetches which can improve performance.
# The number of processes multiplied by worker_prefetch_multiplier is the number of tasks
# that are prefetched by a worker. A value greater than 1 can result in tasks being unnecessarily
# blocked if there are multiple workers and one worker prefetches tasks that sit behind long
# running tasks while another worker has unutilized processes that are unable to process the already
# claimed blocked tasks.
# https://docs.celeryproject.org/en/stable/userguide/optimizing.html#prefetch-limits
#
# Variable: AIRFLOW__CELERY__WORKER_PREFETCH_MULTIPLIER
#
worker_prefetch_multiplier = 1

# Specify if remote control of the workers is enabled.
# In some cases when the broker does not support remote control, Celery creates lots of
# ``.*reply-celery-pidbox`` queues. You can prevent this by setting this to false.
# However, with this disabled Flower won't work.
# https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/index.html#broker-overview
#
# Variable: AIRFLOW__CELERY__WORKER_ENABLE_REMOTE_CONTROL
#
worker_enable_remote_control = true

# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
# a sqlalchemy database. Refer to the Celery documentation for more information.
#
# Variable: AIRFLOW__CELERY__BROKER_URL
#
broker_url = redis://redis:6379/0

# The Celery result_backend. When a job finishes, it needs to update the
# metadata of the job. Therefore it will post a message on a message bus,
# or insert it into a database (depending of the backend)
# This status is used by the scheduler to update the state of the task
# The use of a database is highly recommended
# When not specified, sql_alchemy_conn with a db+ scheme prefix will be used
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings
#
# Example: result_backend = db+postgresql://postgres:airflow@postgres/airflow
#
# Variable: AIRFLOW__CELERY__RESULT_BACKEND
#
# result_backend = 

# Optional configuration dictionary to pass to the Celery result backend SQLAlchemy engine.
#
# Example: result_backend_sqlalchemy_engine_options = {"pool_recycle": 1800}
#
# Variable: AIRFLOW__CELERY__RESULT_BACKEND_SQLALCHEMY_ENGINE_OPTIONS
#
result_backend_sqlalchemy_engine_options = 

# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
# it ``airflow celery flower``. This defines the IP that Celery Flower runs on
#
# Variable: AIRFLOW__CELERY__FLOWER_HOST
#
flower_host = 0.0.0.0

# The root URL for Flower
#
# Example: flower_url_prefix = /flower
#
# Variable: AIRFLOW__CELERY__FLOWER_URL_PREFIX
#
flower_url_prefix = 

# This defines the port that Celery Flower runs on
#
# Variable: AIRFLOW__CELERY__FLOWER_PORT
#
flower_port = 5555

# Securing Flower with Basic Authentication
# Accepts user:password pairs separated by a comma
#
# Example: flower_basic_auth = user1:password1,user2:password2
#
# Variable: AIRFLOW__CELERY__FLOWER_BASIC_AUTH
#
flower_basic_auth = 

# How many processes CeleryExecutor uses to sync task state.
# 0 means to use max(1, number of cores - 1) processes.
#
# Variable: AIRFLOW__CELERY__SYNC_PARALLELISM
#
sync_parallelism = 0

# Import path for celery configuration options
#
# Variable: AIRFLOW__CELERY__CELERY_CONFIG_OPTIONS
#
celery_config_options = airflow.providers.celery.executors.default_celery.DEFAULT_CELERY_CONFIG

#
# Variable: AIRFLOW__CELERY__SSL_ACTIVE
#
ssl_active = False

# Path to the client key.
#
# Variable: AIRFLOW__CELERY__SSL_KEY
#
ssl_key = 

# Path to the client certificate.
#
# Variable: AIRFLOW__CELERY__SSL_CERT
#
ssl_cert = 

# Path to the CA certificate.
#
# Variable: AIRFLOW__CELERY__SSL_CACERT
#
ssl_cacert = 

# Celery Pool implementation.
# Choices include: ``prefork`` (default), ``eventlet``, ``gevent`` or ``solo``.
# See:
# https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency
# https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html
#
# Variable: AIRFLOW__CELERY__POOL
#
pool = prefork

# The number of seconds to wait before timing out ``send_task_to_executor`` or
# ``fetch_celery_task_state`` operations.
#
# Variable: AIRFLOW__CELERY__OPERATION_TIMEOUT
#
operation_timeout = 1.0

# Celery task will report its status as 'started' when the task is executed by a worker.
# This is used in Airflow to keep track of the running tasks and if a Scheduler is restarted
# or run in HA mode, it can adopt the orphan tasks launched by previous SchedulerJob.
#
# Variable: AIRFLOW__CELERY__TASK_TRACK_STARTED
#
task_track_started = True

# The Maximum number of retries for publishing task messages to the broker when failing
# due to ``AirflowTaskTimeout`` error before giving up and marking Task as failed.
#
# Variable: AIRFLOW__CELERY__TASK_PUBLISH_MAX_RETRIES
#
task_publish_max_retries = 3

# Worker initialisation check to validate Metadata Database connection
#
# Variable: AIRFLOW__CELERY__WORKER_PRECHECK
#
worker_precheck = False

[celery_broker_transport_options]
# This section is for specifying options which can be passed to the
# underlying celery broker transport. See:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options

# The visibility timeout defines the number of seconds to wait for the worker
# to acknowledge the task before the message is redelivered to another worker.
# Make sure to increase the visibility timeout to match the time of the longest
# ETA you're planning to use.
# visibility_timeout is only supported for Redis and SQS celery brokers.
# See:
# https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/redis.html#visibility-timeout
#
# Example: visibility_timeout = 21600
#
# Variable: AIRFLOW__CELERY_BROKER_TRANSPORT_OPTIONS__VISIBILITY_TIMEOUT
#
# visibility_timeout = 

# The sentinel_kwargs parameter allows passing additional options to the Sentinel client.
# In a typical scenario where Redis Sentinel is used as the broker and Redis servers are
# password-protected, the password needs to be passed through this parameter. Although its
# type is string, it is required to pass a string that conforms to the dictionary format.
# See:
# https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/redis.html#configuration
#
# Example: sentinel_kwargs = {"password": "password_for_redis_server"}
#
# Variable: AIRFLOW__CELERY_BROKER_TRANSPORT_OPTIONS__SENTINEL_KWARGS
#
# sentinel_kwargs = 

[local_kubernetes_executor]
# This section only applies if you are using the ``LocalKubernetesExecutor`` in
# ``[core]`` section above

# Define when to send a task to ``KubernetesExecutor`` when using ``LocalKubernetesExecutor``.
# When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``),
# the task is executed via ``KubernetesExecutor``,
# otherwise via ``LocalExecutor``
#
# Variable: AIRFLOW__LOCAL_KUBERNETES_EXECUTOR__KUBERNETES_QUEUE
#
kubernetes_queue = kubernetes

[kubernetes_executor]
# Kwargs to override the default urllib3 Retry used in the kubernetes API client
#
# Example: api_client_retry_configuration = { "total": 3, "backoff_factor": 0.5 }
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__API_CLIENT_RETRY_CONFIGURATION
#
api_client_retry_configuration = 

# Flag to control the information added to kubernetes executor logs for better traceability
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__LOGS_TASK_METADATA
#
logs_task_metadata = False

# Path to the YAML pod file that forms the basis for KubernetesExecutor workers.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__POD_TEMPLATE_FILE
#
pod_template_file = 

# The repository of the Kubernetes Image for the Worker to Run
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__WORKER_CONTAINER_REPOSITORY
#
worker_container_repository = 

# The tag of the Kubernetes Image for the Worker to Run
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__WORKER_CONTAINER_TAG
#
worker_container_tag = 

# The Kubernetes namespace where airflow workers should be created. Defaults to ``default``
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__NAMESPACE
#
namespace = default

# If True, all worker pods will be deleted upon termination
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__DELETE_WORKER_PODS
#
delete_worker_pods = True

# If False (and delete_worker_pods is True),
# failed worker pods will not be deleted so users can investigate them.
# This only prevents removal of worker pods where the worker itself failed,
# not when the task it ran failed.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__DELETE_WORKER_PODS_ON_FAILURE
#
delete_worker_pods_on_failure = False

# Number of Kubernetes Worker Pod creation calls per scheduler loop.
# Note that the current default of "1" will only launch a single pod
# per-heartbeat. It is HIGHLY recommended that users increase this
# number to match the tolerance of their kubernetes cluster for
# better performance.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__WORKER_PODS_CREATION_BATCH_SIZE
#
worker_pods_creation_batch_size = 1

# Allows users to launch pods in multiple namespaces.
# Will require creating a cluster-role for the scheduler,
# or use multi_namespace_mode_namespace_list configuration.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__MULTI_NAMESPACE_MODE
#
multi_namespace_mode = False

# If multi_namespace_mode is True while scheduler does not have a cluster-role,
# give the list of namespaces where the scheduler will schedule jobs
# Scheduler needs to have the necessary permissions in these namespaces.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__MULTI_NAMESPACE_MODE_NAMESPACE_LIST
#
multi_namespace_mode_namespace_list = 

# Use the service account kubernetes gives to pods to connect to kubernetes cluster.
# It's intended for clients that expect to be running inside a pod running on kubernetes.
# It will raise an exception if called from a process not running in a kubernetes environment.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__IN_CLUSTER
#
in_cluster = True

# When running with in_cluster=False change the default cluster_context or config_file
# options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__CLUSTER_CONTEXT
#
# cluster_context = 

# Path to the kubernetes configfile to be used when ``in_cluster`` is set to False
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__CONFIG_FILE
#
# config_file = 

# Keyword parameters to pass while calling a kubernetes client core_v1_api methods
# from Kubernetes Executor provided as a single line formatted JSON dictionary string.
# List of supported params are similar for all core_v1_apis, hence a single config
# variable for all apis. See:
# https://raw.githubusercontent.com/kubernetes-client/python/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/api/core_v1_api.py
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__KUBE_CLIENT_REQUEST_ARGS
#
kube_client_request_args = 

# Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client
# ``core_v1_api`` method when using the Kubernetes Executor.
# This should be an object and can contain any of the options listed in the ``v1DeleteOptions``
# class defined here:
# https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19
#
# Example: delete_option_kwargs = {"grace_period_seconds": 10}
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__DELETE_OPTION_KWARGS
#
delete_option_kwargs = 

# Enables TCP keepalive mechanism. This prevents Kubernetes API requests to hang indefinitely
# when idle connection is time-outed on services like cloud load balancers or firewalls.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__ENABLE_TCP_KEEPALIVE
#
enable_tcp_keepalive = True

# When the `enable_tcp_keepalive` option is enabled, TCP probes a connection that has
# been idle for `tcp_keep_idle` seconds.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__TCP_KEEP_IDLE
#
tcp_keep_idle = 120

# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond
# to a keepalive probe, TCP retransmits the probe after `tcp_keep_intvl` seconds.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__TCP_KEEP_INTVL
#
tcp_keep_intvl = 30

# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond
# to a keepalive probe, TCP retransmits the probe `tcp_keep_cnt number` of times before
# a connection is considered to be broken.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__TCP_KEEP_CNT
#
tcp_keep_cnt = 6

# Set this to false to skip verifying SSL certificate of Kubernetes python client.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__VERIFY_SSL
#
verify_ssl = True

# How often in seconds to check for task instances stuck in "queued" status without a pod
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__WORKER_PODS_QUEUED_CHECK_INTERVAL
#
worker_pods_queued_check_interval = 60

# Path to a CA certificate to be used by the Kubernetes client to verify the server's SSL certificate.
#
# Variable: AIRFLOW__KUBERNETES_EXECUTOR__SSL_CA_CERT
#
ssl_ca_cert = 

[dask]
# This section only applies if you are using DaskExecutor.

# The IP address and port of the Dask cluster's scheduler.
#
# Variable: AIRFLOW__DASK__CLUSTER_ADDRESS
#
cluster_address = 127.0.0.1:8786

# Path to a CA certificate file encoded in PEM format to access a secured Dask scheduler.
#
# Variable: AIRFLOW__DASK__TLS_CA
#
tls_ca = 

# Path to a certificate file for the client, encoded in PEM format.
#
# Variable: AIRFLOW__DASK__TLS_CERT
#
tls_cert = 

# Path to a key file for the client, encoded in PEM format.
#
# Variable: AIRFLOW__DASK__TLS_KEY
#
tls_key = 

[elasticsearch]
# Elasticsearch host
#
# Variable: AIRFLOW__ELASTICSEARCH__HOST
#
host = 

# Format of the log_id, which is used to query for a given tasks logs
#
# Variable: AIRFLOW__ELASTICSEARCH__LOG_ID_TEMPLATE
#
log_id_template = {dag_id}-{task_id}-{run_id}-{map_index}-{try_number}

# Used to mark the end of a log stream for a task
#
# Variable: AIRFLOW__ELASTICSEARCH__END_OF_LOG_MARK
#
end_of_log_mark = end_of_log

# Qualified URL for an elasticsearch frontend (like Kibana) with a template argument for log_id
# Code will construct log_id using the log_id template from the argument above.
# NOTE: scheme will default to https if one is not provided
#
# Example: frontend = http://localhost:5601/app/kibana#/discover?_a=(columns:!(message),query:(language:kuery,query:'log_id: "{log_id}"'),sort:!(log.offset,asc))
#
# Variable: AIRFLOW__ELASTICSEARCH__FRONTEND
#
frontend = 

# Write the task logs to the stdout of the worker, rather than the default files
#
# Variable: AIRFLOW__ELASTICSEARCH__WRITE_STDOUT
#
write_stdout = False

# Instead of the default log formatter, write the log lines as JSON
#
# Variable: AIRFLOW__ELASTICSEARCH__JSON_FORMAT
#
json_format = False

# Log fields to also attach to the json output, if enabled
#
# Variable: AIRFLOW__ELASTICSEARCH__JSON_FIELDS
#
json_fields = asctime, filename, lineno, levelname, message

# The field where host name is stored (normally either `host` or `host.name`)
#
# Variable: AIRFLOW__ELASTICSEARCH__HOST_FIELD
#
host_field = host

# The field where offset is stored (normally either `offset` or `log.offset`)
#
# Variable: AIRFLOW__ELASTICSEARCH__OFFSET_FIELD
#
offset_field = offset

# Comma separated list of index patterns to use when searching for logs (default: `_all`).
#
# Example: index_patterns = something-*
#
# Variable: AIRFLOW__ELASTICSEARCH__INDEX_PATTERNS
#
index_patterns = _all

[elasticsearch_configs]
#
# Variable: AIRFLOW__ELASTICSEARCH_CONFIGS__HTTP_COMPRESS
#
http_compress = False

#
# Variable: AIRFLOW__ELASTICSEARCH_CONFIGS__VERIFY_CERTS
#
verify_certs = True

[imap]
# Options for IMAP provider.

# ssl_context = 

[azure_remote_logging]
# Configuration that needs to be set for enable remote logging in Azure Blob Storage

remote_wasb_log_container = airflow-logs

[openlineage]
transport = '{"type": "http", "url": "http://10.0.19.7:5000"}'
namespace='my-namespace'


# This section applies settings for OpenLineage integration.
# For backwards compatibility with `openlineage-python` one can still use
# `openlineage.yml` file or `OPENLINEAGE_` environment variables. However, below
# configuration takes precedence over those.
# More in documentation - https://openlineage.io/docs/client/python#configuration.

# Set this to true if you don't want OpenLineage to emit events.
#
# Variable: AIRFLOW__OPENLINEAGE__DISABLED
#
disabled = False

# Semicolon separated string of Airflow Operator names to disable
#
# Example: disabled_for_operators = airflow.operators.bash.BashOperator;airflow.operators.python.PythonOperator
#
# Variable: AIRFLOW__OPENLINEAGE__DISABLED_FOR_OPERATORS
#
disabled_for_operators = 

# OpenLineage namespace
#
# Example: namespace = food_delivery
#
# Variable: AIRFLOW__OPENLINEAGE__NAMESPACE
#
# namespace = 

# Semicolon separated paths to custom OpenLineage extractors.
#
# Example: extractors = full.path.to.ExtractorClass;full.path.to.AnotherExtractorClass
#
# Variable: AIRFLOW__OPENLINEAGE__EXTRACTORS
#
extractors = 

# Path to YAML config. This provides backwards compatibility to pass config as
# `openlineage.yml` file.
#
# Variable: AIRFLOW__OPENLINEAGE__CONFIG_PATH
#
config_path = 

# OpenLineage Client transport configuration. It should contain type
# and additional options per each type.
# 
# Currently supported types are:
# 
#   * HTTP
#   * Kafka
#   * Console
#
# Example: transport = {"type": "http", "url": "http://localhost:5000"}
#
# Variable: AIRFLOW__OPENLINEAGE__TRANSPORT
#
transport = 

# If disabled, OpenLineage events do not contain source code of particular
# operators, like PythonOperator.
#
# Variable: AIRFLOW__OPENLINEAGE__DISABLE_SOURCE_CODE
#
# disable_source_code =