## CLO GENERATED CONFIGURATION ###
# This file is a copy of the fluentd configuration entrypoint
# which should normally be supplied in a configmap.
log_level "#{ENV['LOG_LEVEL'] || 'warn'}"
# Prometheus Monitoring
@type prometheus
bind "#{ENV['PROM_BIND_IP']}"
cert_path /etc/collector/metrics/tls.crt
private_key_path /etc/collector/metrics/tls.key
@type prometheus_monitor
hostname ${hostname}
# excluding prometheus_tail_monitor
# since it leaks namespace/pod info
# via file paths
# tail_monitor plugin which publishes log_collected_bytes_total
@type collected_tail_monitor
hostname ${hostname}
# This is considered experimental by the repo
@type prometheus_output_monitor
hostname ${hostname}
# Logs from linux journal
@type systemd
@id systemd-input
@label @INGRESS
path '/var/log/journal'
@type local
persistent true
# NOTE: if this does not end in .json, fluentd will think it
# is the name of a directory - see fluentd storage_local.rb
path '/var/lib/fluentd/pos/journal_pos.json'
matches "#{ENV['JOURNAL_FILTERS_JSON'] || '[]'}"
tag journal
read_from_head "#{if (val = ENV.fetch('JOURNAL_READ_FROM_HEAD','')) && (val.length > 0); val; else 'false'; end}"
# Logs from containers (including openshift containers)
@type tail
@id container-input
path "/var/log/pods/*/*/*.log"
exclude_path ["/var/log/pods/openshift-logging_collector-*/*/*.log", "/var/log/pods/openshift-logging_elasticsearch-*/*/*.log", "/var/log/pods/openshift-logging_kibana-*/*/*.log", "/var/log/pods/openshift-logging_*/loki*/*.log", "/var/log/pods/openshift-logging_*/gateway/*.log", "/var/log/pods/openshift-logging_*/opa/*.log", "/var/log/pods/*/*/*.gz", "/var/log/pods/*/*/*.tmp"]
pos_file "/var/lib/fluentd/pos/es-containers.log.pos"
follow_inodes true
refresh_interval 5
rotate_wait 5
tag kubernetes.*
read_from_head "true"
skip_refresh_on_startup true
@label @CONCAT
@type regexp
expression /^(?<@timestamp>[^\s]+) (?stdout|stderr) (?[F|P]) (?.*)$/
time_key '@timestamp'
keep_time_key true
# linux audit logs
@type tail
@id audit-input
@label @INGRESS
path "/var/log/audit/audit.log"
pos_file "/var/lib/fluentd/pos/audit.log.pos"
follow_inodes true
tag linux-audit.log
@type viaq_host_audit
# k8s audit logs
@type tail
@id k8s-audit-input
@label @INGRESS
path "/var/log/kube-apiserver/audit.log"
pos_file "/var/lib/fluentd/pos/kube-apiserver.audit.log.pos"
follow_inodes true
tag k8s-audit.log
@type json
time_key requestReceivedTimestamp
# In case folks want to parse based on the requestReceivedTimestamp key
keep_time_key true
time_format %Y-%m-%dT%H:%M:%S.%N%z
# Openshift audit logs
@type tail
@id openshift-audit-input
@label @INGRESS
path /var/log/oauth-apiserver/audit.log,/var/log/openshift-apiserver/audit.log,/var/log/oauth-server/audit.log
pos_file /var/lib/fluentd/pos/oauth-apiserver.audit.log
follow_inodes true
tag openshift-audit.log
@type json
time_key requestReceivedTimestamp
# In case folks want to parse based on the requestReceivedTimestamp key
keep_time_key true
time_format %Y-%m-%dT%H:%M:%S.%N%z
# Openshift Virtual Network (OVN) audit logs
@type tail
@id ovn-audit-input
@label @INGRESS
path "/var/log/ovn/acl-audit-log.log"
pos_file "/var/lib/fluentd/pos/acl-audit-log.pos"
follow_inodes true
tag ovn-audit.log
refresh_interval 5
rotate_wait 5
read_from_head true
@type none
# Concat log lines of container logs, and send to INGRESS pipeline
# Ingress pipeline
# Sending application source type to pipeline
# Sending infrastructure source type to pipeline
# Sending audit source type to pipeline
# Copying pipeline all-logs-to-lokistack to outputs
# Copying pipeline all-logs-to-lokistack-1 to outputs
# Copying pipeline all-logs-to-lokistack-2 to outputs
# Ship logs to specific outputs
#dedot namespace_labels and rebuild message field if present
@type record_modifier
_dummy_ ${if m=record.dig("kubernetes","namespace_labels");record["kubernetes"]["namespace_labels"]={}.tap{|n|m.each{|k,v|n[k.gsub(/[.\/]/,'_')]=v}};end}
_dummy2_ ${if m=record.dig("kubernetes","labels");record["kubernetes"]["labels"]={}.tap{|n|m.each{|k,v|n[k.gsub(/[.\/]/,'_')]=v}};end}
_dummy3_ ${if m=record.dig("kubernetes","flat_labels");record["kubernetes"]["flat_labels"]=[].tap{|n|m.each_with_index{|s, i|n[i] = s.gsub(/[.\/]/,'_')}};end}
remove_keys _dummy_, _dummy2_, _dummy3_
@type record_modifier
_kubernetes_container_name ${record.dig("kubernetes","container_name")}
_kubernetes_host "#{ENV['NODE_NAME']}"
_kubernetes_namespace_name ${record.dig("kubernetes","namespace_name")}
_kubernetes_pod_name ${record.dig("kubernetes","pod_name")}
_log_type ${record.dig("log_type")}
@type loki
@id default_loki_audit
line_format json
url https://lokistack-dev-gateway-http.openshift-logging.svc:8080/api/logs/v1/audit
ca_cert /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
bearer_token_file /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_container_name _kubernetes_container_name
kubernetes_host _kubernetes_host
kubernetes_namespace_name _kubernetes_namespace_name
kubernetes_pod_name _kubernetes_pod_name
log_type _log_type
@type file
path '/var/lib/fluentd/default_loki_audit'
flush_mode interval
flush_interval 1s
flush_thread_count 2
retry_type exponential_backoff
retry_wait 1s
retry_max_interval 60s
retry_timeout 60m
queued_chunks_limit_size "#{ENV['BUFFER_QUEUE_LIMIT'] || '32'}"
total_limit_size "#{ENV['TOTAL_LIMIT_SIZE_PER_BUFFER'] || '8589934592'}"
chunk_limit_size "#{ENV['BUFFER_SIZE_LIMIT'] || '8m'}"
overflow_action block
disable_chunk_backup true
#dedot namespace_labels and rebuild message field if present
@type record_modifier
_dummy_ ${if m=record.dig("kubernetes","namespace_labels");record["kubernetes"]["namespace_labels"]={}.tap{|n|m.each{|k,v|n[k.gsub(/[.\/]/,'_')]=v}};end}
_dummy2_ ${if m=record.dig("kubernetes","labels");record["kubernetes"]["labels"]={}.tap{|n|m.each{|k,v|n[k.gsub(/[.\/]/,'_')]=v}};end}
_dummy3_ ${if m=record.dig("kubernetes","flat_labels");record["kubernetes"]["flat_labels"]=[].tap{|n|m.each_with_index{|s, i|n[i] = s.gsub(/[.\/]/,'_')}};end}
remove_keys _dummy_, _dummy2_, _dummy3_
@type record_modifier
_kubernetes_container_name ${record.dig("kubernetes","container_name")}
_kubernetes_host "#{ENV['NODE_NAME']}"
_kubernetes_namespace_name ${record.dig("kubernetes","namespace_name")}
_kubernetes_pod_name ${record.dig("kubernetes","pod_name")}
_log_type ${record.dig("log_type")}
@type loki
@id default_loki_infra
line_format json
url https://lokistack-dev-gateway-http.openshift-logging.svc:8080/api/logs/v1/infrastructure
ca_cert /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
bearer_token_file /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_container_name _kubernetes_container_name
kubernetes_host _kubernetes_host
kubernetes_namespace_name _kubernetes_namespace_name
kubernetes_pod_name _kubernetes_pod_name
log_type _log_type
@type file
path '/var/lib/fluentd/default_loki_infra'
flush_mode interval
flush_interval 1s
flush_thread_count 2
retry_type exponential_backoff
retry_wait 1s
retry_max_interval 60s
retry_timeout 60m
queued_chunks_limit_size "#{ENV['BUFFER_QUEUE_LIMIT'] || '32'}"
total_limit_size "#{ENV['TOTAL_LIMIT_SIZE_PER_BUFFER'] || '8589934592'}"
chunk_limit_size "#{ENV['BUFFER_SIZE_LIMIT'] || '8m'}"
overflow_action block
disable_chunk_backup true