## CLO GENERATED CONFIGURATION ###
# This file is a copy of the fluentd configuration entrypoint
# which should normally be supplied in a configmap.
log_level "#{ENV['LOG_LEVEL'] || 'warn'}"
# Prometheus Monitoring
@type prometheus
bind "[::]"
cert_path /etc/collector/metrics/tls.crt
private_key_path /etc/collector/metrics/tls.key
@type prometheus_monitor
hostname ${hostname}
# excluding prometheus_tail_monitor
# since it leaks namespace/pod info
# via file paths
# tail_monitor plugin which publishes log_collected_bytes_total
@type collected_tail_monitor
hostname ${hostname}
# This is considered experimental by the repo
@type prometheus_output_monitor
hostname ${hostname}
# Logs from linux journal
@type systemd
@id systemd-input
@label @INGRESS
path '/var/log/journal'
@type local
persistent true
# NOTE: if this does not end in .json, fluentd will think it
# is the name of a directory - see fluentd storage_local.rb
path '/var/lib/fluentd/pos/journal_pos.json'
matches "#{ENV['JOURNAL_FILTERS_JSON'] || '[]'}"
tag journal
read_from_head "#{if (val = ENV.fetch('JOURNAL_READ_FROM_HEAD','')) && (val.length > 0); val; else 'false'; end}"
# Logs from containers (including openshift containers)
@type tail
@id container-input
path "/var/log/pods/*/*/*.log"
exclude_path ["/var/log/pods/openshift-logging_collector-*/*/*.log", "/var/log/pods/openshift-logging_elasticsearch-*/*/*.log", "/var/log/pods/openshift-logging_kibana-*/*/*.log", "/var/log/pods/*/*/*.gz", "/var/log/pods/*/*/*.tmp"]
pos_file "/var/lib/fluentd/pos/es-containers.log.pos"
follow_inodes true
refresh_interval 5
rotate_wait 5
tag kubernetes.*
read_from_head "true"
skip_refresh_on_startup true
@label @CONCAT
@type regexp
expression /^(?<@timestamp>[^\s]+) (?stdout|stderr) (?[F|P]) (?.*)$/
time_key '@timestamp'
keep_time_key true
# linux audit logs
@type tail
@id audit-input
@label @INGRESS
path "/var/log/audit/audit.log"
pos_file "/var/lib/fluentd/pos/audit.log.pos"
follow_inodes true
tag linux-audit.log
@type viaq_host_audit
# k8s audit logs
@type tail
@id k8s-audit-input
@label @INGRESS
path "/var/log/kube-apiserver/audit.log"
pos_file "/var/lib/fluentd/pos/kube-apiserver.audit.log.pos"
follow_inodes true
tag k8s-audit.log
@type json
time_key requestReceivedTimestamp
# In case folks want to parse based on the requestReceivedTimestamp key
keep_time_key true
time_format %Y-%m-%dT%H:%M:%S.%N%z
# Openshift audit logs
@type tail
@id openshift-audit-input
@label @INGRESS
path /var/log/oauth-apiserver/audit.log,/var/log/openshift-apiserver/audit.log
pos_file /var/lib/fluentd/pos/oauth-apiserver.audit.log
follow_inodes true
tag openshift-audit.log
@type json
time_key requestReceivedTimestamp
# In case folks want to parse based on the requestReceivedTimestamp key
keep_time_key true
time_format %Y-%m-%dT%H:%M:%S.%N%z
# Openshift Virtual Network (OVN) audit logs
@type tail
@id ovn-audit-input
@label @INGRESS
path "/var/log/ovn/acl-audit-log.log"
pos_file "/var/lib/fluentd/pos/acl-audit-log.pos"
follow_inodes true
tag ovn-audit.log
refresh_interval 5
rotate_wait 5
read_from_head true
@type none
# Concat log lines of container logs, and send to INGRESS pipeline
@type concat
key message
partial_key logtag
partial_value P
separator ''
@type relabel
@label @INGRESS
# Ingress pipeline
# Filter out PRIORITY from journal logs
@type grep
key PRIORITY
pattern ^7$
# Process OVN logs
@type record_modifier
@timestamp ${DateTime.parse(record['message'].split('|')[0]).rfc3339(6)}
level ${record['message'].split('|')[3].downcase}
# Process Kube and OpenShift Audit logs
@type record_modifier
@timestamp ${record['requestReceivedTimestamp']}
# Retag Journal logs to specific tags
@type rewrite_tag_filter
# skip to @INGRESS label section
@label @INGRESS
# see if this is a kibana container for special log handling
# looks like this:
# k8s_kibana.a67f366_logging-kibana-1-d90e3_logging_26c51a61-2835-11e6-ad29-fa163e4944d5_f0db49a2
# we filter these logs through the kibana_transform.conf filter
key CONTAINER_NAME
pattern ^k8s_kibana\.
tag kubernetes.journal.container.kibana
key CONTAINER_NAME
pattern ^k8s_[^_]+_logging-eventrouter-[^_]+_
tag kubernetes.journal.container._default_.kubernetes-event
# mark logs from default namespace for processing as k8s logs but stored as system logs
key CONTAINER_NAME
pattern ^k8s_[^_]+_[^_]+_default_
tag kubernetes.journal.container._default_
# mark logs from kube-* namespaces for processing as k8s logs but stored as system logs
key CONTAINER_NAME
pattern ^k8s_[^_]+_[^_]+_kube-(.+)_
tag kubernetes.journal.container._kube-$1_
# mark logs from openshift-* namespaces for processing as k8s logs but stored as system logs
key CONTAINER_NAME
pattern ^k8s_[^_]+_[^_]+_openshift-(.+)_
tag kubernetes.journal.container._openshift-$1_
# mark logs from openshift namespace for processing as k8s logs but stored as system logs
key CONTAINER_NAME
pattern ^k8s_[^_]+_[^_]+_openshift_
tag kubernetes.journal.container._openshift_
# mark fluentd container logs
key CONTAINER_NAME
pattern ^k8s_.*fluentd
tag kubernetes.journal.container.fluentd
# this is a kubernetes container
key CONTAINER_NAME
pattern ^k8s_
tag kubernetes.journal.container
# not kubernetes - assume a system log or system container log
key _TRANSPORT
pattern .+
tag journal.system
# Invoke kubernetes apiserver to get kubernetes metadata
@id kubernetes-metadata
@type kubernetes_metadata
kubernetes_url 'https://kubernetes.default.svc'
annotation_match ["^containerType\.logging\.openshift\.io\/.*$"]
allow_orphans false
cache_size '1000'
ssl_partial_chain 'true'
# Parse Json fields for container, journal and eventrouter logs
@type parse_json_field
merge_json_log true
preserve_json_log true
json_fields 'message'
# Fix level field in audit logs
@type record_modifier
k8s_audit_level ${record['level']}
@type record_modifier
openshift_audit_level ${record['level']}
# Viaq Data Model
@type viaq_data_model
enable_flatten_labels true
enable_prune_empty_fields false
default_keep_fields CEE,time,@timestamp,aushape,ci_job,collectd,docker,fedora-ci,file,foreman,geoip,hostname,ipaddr4,ipaddr6,kubernetes,level,message,namespace_name,namespace_uuid,offset,openstack,ovirt,pid,pipeline_metadata,rsyslog,service,systemd,tags,testcase,tlog,viaq_msg_id
keep_empty_fields 'message'
rename_time true
pipeline_type 'collector'
process_kubernetes_events false
name warn
match 'Warning|WARN|^W[0-9]+|level=warn|Value:warn|"level":"warn"'
name info
match 'Info|INFO|^I[0-9]+|level=info|Value:info|"level":"info"'
name error
match 'Error|ERROR|^E[0-9]+|level=error|Value:error|"level":"error"'
name critical
match 'Critical|CRITICAL|^C[0-9]+|level=critical|Value:critical|"level":"critical"'
name debug
match 'Debug|DEBUG|^D[0-9]+|level=debug|Value:debug|"level":"debug"'
tag "journal.system**"
type sys_journal
remove_keys log,stream,MESSAGE,_SOURCE_REALTIME_TIMESTAMP,__REALTIME_TIMESTAMP,CONTAINER_ID,CONTAINER_ID_FULL,CONTAINER_NAME,PRIORITY,_BOOT_ID,_CAP_EFFECTIVE,_CMDLINE,_COMM,_EXE,_GID,_HOSTNAME,_MACHINE_ID,_PID,_SELINUX_CONTEXT,_SYSTEMD_CGROUP,_SYSTEMD_SLICE,_SYSTEMD_UNIT,_TRANSPORT,_UID,_AUDIT_LOGINUID,_AUDIT_SESSION,_SYSTEMD_OWNER_UID,_SYSTEMD_SESSION,_SYSTEMD_USER_UNIT,CODE_FILE,CODE_FUNCTION,CODE_LINE,ERRNO,MESSAGE_ID,RESULT,UNIT,_KERNEL_DEVICE,_KERNEL_SUBSYSTEM,_UDEV_SYSNAME,_UDEV_DEVNODE,_UDEV_DEVLINK,SYSLOG_FACILITY,SYSLOG_IDENTIFIER,SYSLOG_PID
tag "kubernetes.var.log.pods.**_eventrouter-** k8s-audit.log** openshift-audit.log** ovn-audit.log**"
type k8s_json_file
remove_keys stream
process_kubernetes_events 'true'
tag "kubernetes.var.log.pods**"
type k8s_json_file
remove_keys stream
# Generate elasticsearch id
@type elasticsearch_genid_ext
hash_id_key viaq_msg_id
alt_key kubernetes.event.metadata.uid
alt_tags 'kubernetes.var.log.pods.**_eventrouter-*.** kubernetes.journal.container._default_.kubernetes-event'
# Include Infrastructure logs
@type relabel
@label @_INFRASTRUCTURE
# Include Application logs
@type relabel
@label @_APPLICATION
# Include Audit logs
@type relabel
@label @_AUDIT
# Send any remaining unmatched tags to stdout
@type stdout
# Sending application source type to pipeline
@type record_modifier
log_type application
@type relabel
@label @FORWARD_APP
# Sending infrastructure source type to pipeline
@type record_modifier
log_type infrastructure
@type relabel
@label @FORWARD_INFRA_AUDIT
# Sending audit source type to pipeline
@type record_modifier
log_type audit
@type relabel
@label @FORWARD_INFRA_AUDIT
# Copying pipeline forward-app to outputs
# Parse the logs into json
@type parser
key_name message
reserve_data yes
hash_value_field structured
@type json
json_parser oj
@type relabel
@label @DEFAULT
# Copying pipeline forward-infra-audit to outputs
@type relabel
@label @DEFAULT
# Ship logs to specific outputs
# Viaq Data Model
@type viaq_data_model
enable_openshift_model false
enable_prune_empty_fields false
rename_time false
undefined_dot_replace_char UNUSED
elasticsearch_index_prefix_field 'viaq_index_name'
enabled 'true'
tag "kubernetes.var.log.pods.openshift_** kubernetes.var.log.pods.openshift-*_** kubernetes.var.log.pods.default_** kubernetes.var.log.pods.kube-*_** var.log.pods.openshift_** var.log.pods.openshift-*_** var.log.pods.default_** var.log.pods.kube-*_** journal.system** system.var.log**"
name_type static
static_index_name infra-write
enabled 'true'
tag "linux-audit.log** k8s-audit.log** openshift-audit.log** ovn-audit.log**"
name_type static
static_index_name audit-write
enabled 'true'
tag "**"
name_type structured
static_index_name app-write
@type viaq_data_model
enable_prune_labels true
enable_openshift_model false
rename_time false
undefined_dot_replace_char UNUSED
prune_labels_exclusions app.kubernetes.io/name,app.kubernetes.io/instance,app.kubernetes.io/version,app.kubernetes.io/component,app.kubernetes.io/part-of,app.kubernetes.io/managed-by,app.kubernetes.io/created-by
#remove structured field if present
@type record_modifier
char_encoding utf-8:utf-8
remove_keys structured
@type elasticsearch
@id retry_default
host elasticsearch
port 9200
verify_es_version_at_startup false
scheme https
ssl_version TLSv1_2
client_key '/var/run/ocp-collector/secrets/collector/tls.key'
client_cert '/var/run/ocp-collector/secrets/collector/tls.crt'
ca_file '/var/run/ocp-collector/secrets/collector/ca-bundle.crt'
target_index_key viaq_index_name
id_key viaq_msg_id
remove_keys viaq_index_name
type_name _doc
http_backend typhoeus
write_operation create
# https://github.com/uken/fluent-plugin-elasticsearch#suppress_type_name
suppress_type_name 'true'
reload_connections 'true'
# https://github.com/uken/fluent-plugin-elasticsearch#reload-after
reload_after '200'
# https://github.com/uken/fluent-plugin-elasticsearch#sniffer-class-name
sniffer_class_name 'Fluent::Plugin::ElasticsearchSimpleSniffer'
reload_on_failure false
# 2 ^ 31
request_timeout 2147483648
@type file
path '/var/lib/fluentd/retry_default'
flush_mode interval
flush_interval 1s
flush_thread_count 2
retry_type exponential_backoff
retry_wait 1s
retry_max_interval 60s
retry_timeout 60m
queued_chunks_limit_size "#{ENV['BUFFER_QUEUE_LIMIT'] || '32'}"
total_limit_size "#{ENV['TOTAL_LIMIT_SIZE_PER_BUFFER'] || '8589934592'}"
chunk_limit_size "#{ENV['BUFFER_SIZE_LIMIT'] || '8m'}"
overflow_action block
disable_chunk_backup true
@type elasticsearch
@id default
host elasticsearch
port 9200
verify_es_version_at_startup false
scheme https
ssl_version TLSv1_2
client_key '/var/run/ocp-collector/secrets/collector/tls.key'
client_cert '/var/run/ocp-collector/secrets/collector/tls.crt'
ca_file '/var/run/ocp-collector/secrets/collector/ca-bundle.crt'
target_index_key viaq_index_name
id_key viaq_msg_id
remove_keys viaq_index_name
type_name _doc
retry_tag retry_default
http_backend typhoeus
write_operation create
# https://github.com/uken/fluent-plugin-elasticsearch#suppress_type_name
suppress_type_name 'true'
reload_connections 'true'
# https://github.com/uken/fluent-plugin-elasticsearch#reload-after
reload_after '200'
# https://github.com/uken/fluent-plugin-elasticsearch#sniffer-class-name
sniffer_class_name 'Fluent::Plugin::ElasticsearchSimpleSniffer'
reload_on_failure false
# 2 ^ 31
request_timeout 2147483648
@type file
path '/var/lib/fluentd/default'
flush_mode interval
flush_interval 1s
flush_thread_count 2
retry_type exponential_backoff
retry_wait 1s
retry_max_interval 60s
retry_timeout 60m
queued_chunks_limit_size "#{ENV['BUFFER_QUEUE_LIMIT'] || '32'}"
total_limit_size "#{ENV['TOTAL_LIMIT_SIZE_PER_BUFFER'] || '8589934592'}"
chunk_limit_size "#{ENV['BUFFER_SIZE_LIMIT'] || '8m'}"
overflow_action block
disable_chunk_backup true