{ "fluent.conf": "## CLO GENERATED CONFIGURATION ###\n# This file is a copy of the fluentd configuration entrypoint\n# which should normally be supplied in a configmap.\n\n\n log_level \"#{ENV['LOG_LEVEL'] || 'warn'}\"\n\n\n# In each section below, pre- and post- includes don't include anything initially;\n# they exist to enable future additions to openshift conf as needed.\n\n## sources\n## ordered so that syslog always runs last...\n\n @type prometheus\n bind \"#{ENV['POD_IP']}\"\n \n enable true\n certificate_path \"#{ENV['METRICS_CERT'] || '/etc/fluent/metrics/tls.crt'}\"\n private_key_path \"#{ENV['METRICS_KEY'] || '/etc/fluent/metrics/tls.key'}\"\n \n\n\n\n @type prometheus_monitor\n \n hostname ${hostname}\n \n\n\n# excluding prometheus_tail_monitor\n# since it leaks namespace/pod info\n# via file paths\n\n# This is considered experimental by the repo\n\n @type prometheus_output_monitor\n \n hostname ${hostname}\n \n\n#journal logs to gather node\n\n @type systemd\n @id systemd-input\n @label @MEASURE\n path '/var/log/journal'\n \n @type local\n persistent true\n # NOTE: if this does not end in .json, fluentd will think it\n # is the name of a directory - see fluentd storage_local.rb\n path '/var/log/journal_pos.json'\n \n matches \"#{ENV['JOURNAL_FILTERS_JSON'] || '[]'}\"\n tag journal\n read_from_head \"#{if (val = ENV.fetch('JOURNAL_READ_FROM_HEAD','')) && (val.length > 0); val; else 'false'; end}\"\n\n# container logs\n\n @type tail\n @id container-input\n path \"/var/log/containers/*.log\"\n exclude_path [\"/var/log/containers/fluentd-*_openshift-logging_*.log\", \"/var/log/containers/elasticsearch-*_openshift-logging_*.log\", \"/var/log/containers/kibana-*_openshift-logging_*.log\"]\n pos_file \"/var/log/es-containers.log.pos\"\n refresh_interval 5\n rotate_wait 5\n tag kubernetes.*\n read_from_head \"true\"\n @label @MEASURE\n \n @type multi_format\n \n format json\n time_format '%Y-%m-%dT%H:%M:%S.%N%Z'\n keep_time_key true\n \n \n format regexp\n expression /^(?\n \n\n\n\n\n\n\n#syslog input config here\n\n\n\n# Relabel specific sources (e.g. logs.apps) to multiple pipelines\n\n \n\n\n\n# Relabel specific pipelines to multiple, outputs (e.g. ES, kafka stores)\n\n\n# Ship logs to specific outputs\n\n\n", "run.sh": "#!/bin/bash\n\nCFG_DIR=/etc/fluent/configs.d\n\nfluentdargs=\"--no-supervisor\"\n# find the sniffer class file\nsniffer=$( gem contents fluent-plugin-elasticsearch|grep elasticsearch_simple_sniffer.rb )\nif [ -z \"$sniffer\" ] ; then\n sniffer=$( rpm -ql rubygem-fluent-plugin-elasticsearch|grep elasticsearch_simple_sniffer.rb )\nfi\nif [ -n \"$sniffer\" -a -f \"$sniffer\" ] ; then\n fluentdargs=\"$fluentdargs -r $sniffer\"\nfi\n\nset -e\nfluentdargs=\"--suppress-config-dump $fluentdargs\"\n\n\nissue_deprecation_warnings() {\n : # none at the moment\n}\n\nIPADDR4=${NODE_IPV4:-$( /usr/sbin/ip -4 addr show dev eth0 | grep inet | sed -e \"s/[ \\t]*inet \\([0-9.]*\\).*/\\1/\" )}\nIPADDR6=${NODE_IPV6:-$( /usr/sbin/ip -6 addr show dev eth0 | grep inet | sed -e \"s/[ \\t]*inet6 \\([a-z0-9::]*\\).*/\\1/\" | grep -v ^fe80 | grep -v ^::1 || echo \"\")}\n\nexport IPADDR4 IPADDR6\n\n# Check bearer_token_file for fluent-plugin-kubernetes_metadata_filter.\nif [ ! -s /var/run/secrets/kubernetes.io/serviceaccount/token ] ; then\n echo \"ERROR: Bearer_token_file (/var/run/secrets/kubernetes.io/serviceaccount/token) to access the Kubernetes API server is missing or empty.\"\n exit 1\nfi\n\n# If FILE_BUFFER_PATH exists and it is not a directory, mkdir fails with the error.\nFILE_BUFFER_PATH=/var/lib/fluentd\nmkdir -p $FILE_BUFFER_PATH\nFLUENT_CONF=$CFG_DIR/user/fluent.conf\nif [ ! -f \"$FLUENT_CONF\" ] ; then\n echo \"ERROR: The configuration $FLUENT_CONF does not exist\"\n exit 1\nfi\n\n###\n# Calculate the max allowed for each output buffer given the number of\n# buffer file paths\n###\n\nNUM_OUTPUTS=$(grep \"path.*'$FILE_BUFFER_PATH\" $FLUENT_CONF | wc -l)\nif [ $NUM_OUTPUTS -eq 0 ]; then\n # Reset to default single output if log forwarding outputs all invalid\n NUM_OUTPUTS=1\nfi\n\n# Get the available disk size.\nDF_LIMIT=$(df -B1 $FILE_BUFFER_PATH | grep -v Filesystem | awk '{print $2}')\nDF_LIMIT=${DF_LIMIT:-0}\nif [ $DF_LIMIT -eq 0 ]; then\n echo \"ERROR: No disk space is available for file buffer in $FILE_BUFFER_PATH.\"\n exit 1\nfi\n\n# Default to 15% of disk which is approximately 18G\nALLOWED_PERCENT_OF_DISK=${ALLOWED_PERCENT_OF_DISK:-15}\nif [ $ALLOWED_PERCENT_OF_DISK -gt 100 ] || [ $ALLOWED_PERCENT_OF_DISK -le 0 ] ; then\n ALLOWED_PERCENT_OF_DISK=15\n echo ALLOWED_PERCENT_OF_DISK is out of the allowed range. Setting to ${ALLOWED_PERCENT_OF_DISK}%\nfi\n# Determine allowed total given the number of outputs we have.\nALLOWED_DF_LIMIT=$(expr $DF_LIMIT \\* $ALLOWED_PERCENT_OF_DISK / 100) || :\n\n# TOTAL_LIMIT_SIZE per buffer\nTOTAL_LIMIT_SIZE=$(expr $ALLOWED_DF_LIMIT / $NUM_OUTPUTS) || :\necho \"Setting each total_size_limit for $NUM_OUTPUTS buffers to $TOTAL_LIMIT_SIZE bytes\"\nexport TOTAL_LIMIT_SIZE\n\n##\n# Calculate the max number of queued chunks given the size of each chunk\n# and the max allowed space per buffer\n##\nBUFFER_SIZE_LIMIT=$(echo ${BUFFER_SIZE_LIMIT:-8388608})\nBUFFER_QUEUE_LIMIT=$(expr $TOTAL_LIMIT_SIZE / $BUFFER_SIZE_LIMIT)\necho \"Setting queued_chunks_limit_size for each buffer to $BUFFER_QUEUE_LIMIT\"\nexport BUFFER_QUEUE_LIMIT\necho \"Setting chunk_limit_size for each buffer to $BUFFER_SIZE_LIMIT\"\nexport BUFFER_SIZE_LIMIT\n\nissue_deprecation_warnings\n\n# this should be the last thing before launching fluentd so as not to use\n# jemalloc with any other processes\nif type -p jemalloc-config > /dev/null 2>&1 ; then\n export LD_PRELOAD=$( jemalloc-config --libdir )/libjemalloc.so.$( jemalloc-config --revision )\n export LD_BIND_NOW=1 # workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1544815\nfi\nif [ -f /var/log/openshift-apiserver/audit.log.pos ] ; then\n #https://bugzilla.redhat.com/show_bug.cgi?id=1867687\n mv /var/log/openshift-apiserver/audit.log.pos /var/log/oauth-apiserver.audit.log\nfi\n\nexec fluentd $fluentdargs\n" }