Uploaded image for project: 'OpenShift Logging'
  1. OpenShift Logging
  2. LOG-2052

[vector]Infra logs aren't collected correctly

XMLWordPrintable

    • Logging (Core) - Sprint 213

      Description of problem:

      Container logs under projects `openshift*` are sent to app* index, journal logs are not collected:

      $ oc exec elasticsearch-cdm-k31v727y-1-7c8d98bdb6-gcbdq -- es_util --query=app*/_search?pretty |grep pod_namespace
      Defaulted container "elasticsearch" out of: elasticsearch, proxy
                  "pod_namespace" : "openshift-operator-lifecycle-manager",
                  "pod_namespace" : "openshift-operator-lifecycle-manager",
                  "pod_namespace" : "openshift-operator-lifecycle-manager",
                  "pod_namespace" : "openshift-cluster-version",
                  "pod_namespace" : "openshift-cluster-version",
                  "pod_namespace" : "openshift-kube-controller-manager",
                  "pod_namespace" : "openshift-kube-controller-manager",
                  "pod_namespace" : "openshift-kube-controller-manager",
                  "pod_namespace" : "openshift-kube-controller-manager",
                  "pod_namespace" : "openshift-kube-controller-manager",
      $ oc exec elasticsearch-cdm-k31v727y-1-7c8d98bdb6-gcbdq -- es_util --query=infra*/_search?pretty  -d '
      {
        "query": {
        "exists": {
                  "field": "systemd"
              }              
        }                                          
      }'
      Defaulted container "elasticsearch" out of: elasticsearch, proxy
      {
        "took" : 4,
        "timed_out" : false,
        "_shards" : {
          "total" : 3,
          "successful" : 3,
          "skipped" : 0,
          "failed" : 0
        },
        "hits" : {
          "total" : 0,
          "max_score" : null,
          "hits" : [ ]
        }
      }

       

      vector.toml:

      # Logs from containers (including openshift containers)
      [sources.raw_container_logs]
      type = "kubernetes_logs"
      auto_partial_merge = true
      exclude_paths_glob_patterns = ["/var/log/pods/openshift-logging_collector-*/*/*.log", "/var/log/pods/openshift-logging_elasticsearch-*/*/*.log", "/var/log/pods/openshift-logging_kibana-*/*/*.log"]
      
      
      [sources.raw_journal_logs]
      type = "journald"
      
      
      [transforms.container_logs]
      type = "remap"
      inputs = ["raw_container_logs"]
      source = '''
        level = "unknown"
        if match(.message,r'(Warning|WARN|W[0-9]+|level=warn|Value:warn|"level":"warn")'){
      	level = "warn"
        } else if match(.message, r'Info|INFO|I[0-9]+|level=info|Value:info|"level":"info"'){
      	level = "info"
        } else if match(.message, r'Error|ERROR|E[0-9]+|level=error|Value:error|"level":"error"'){
      	level = "error"
        } else if match(.message, r'Debug|DEBUG|D[0-9]+|level=debug|Value:debug|"level":"debug"'){
      	level = "debug"
        }
        .level = level
      
      
        .pipeline_metadata.collector.name = "vector"
        .pipeline_metadata.collector.version = "0.14.1"
        ip4, err = get_env_var("NODE_IPV4")
        .pipeline_metadata.collector.ipaddr4 = ip4
        received, err = format_timestamp(now(),"%+")
        .pipeline_metadata.collector.received_at = received
        .pipeline_metadata.collector.error = err
       '''
      
      
      [transforms.journal_logs]
      type = "remap"
      inputs = ["raw_journal_logs"]
      source = '''
        level = "unknown"
        if match(.message,r'(Warning|WARN|W[0-9]+|level=warn|Value:warn|"level":"warn")'){
      	level = "warn"
        } else if match(.message, r'Info|INFO|I[0-9]+|level=info|Value:info|"level":"info"'){
      	level = "info"
        } else if match(.message, r'Error|ERROR|E[0-9]+|level=error|Value:error|"level":"error"'){
      	level = "error"
        } else if match(.message, r'Debug|DEBUG|D[0-9]+|level=debug|Value:debug|"level":"debug"'){
      	level = "debug"
        }
        .level = level
      
      
        .pipeline_metadata.collector.name = "vector"
        .pipeline_metadata.collector.version = "0.14.1"
        ip4, err = get_env_var("NODE_IPV4")
        .pipeline_metadata.collector.ipaddr4 = ip4
        received, err = format_timestamp(now(),"%+")
        .pipeline_metadata.collector.received_at = received
        .pipeline_metadata.collector.error = err
       '''
      
      
      
      
      [transforms.route_container_logs]
      type = "route"
      inputs = ["container_logs"]
      route.app = '!(starts_with!(.kubernetes.pod_namespace,"kube") && starts_with!(.kubernetes.pod_namespace,"openshift") && .kubernetes.pod_namespace == "default")'
      route.infra = 'starts_with!(.kubernetes.pod_namespace,"kube") || starts_with!(.kubernetes.pod_namespace,"openshift") || .kubernetes.pod_namespace == "default"'
      
      
      
      
      # Rename log stream to "application"
      [transforms.application]
      type = "remap"
      inputs = ["route_container_logs.app"]
      source = """
      .log_type = "app"
      """
      
      
      
      
      # Rename log stream to "infrastructure"
      [transforms.infrastructure]
      type = "remap"
      inputs = ["route_container_logs.infra","journal_logs"]
      source = """
      .log_type = "infra"
      """
      
      
      
      
      [transforms.pipeline_0_]
      type = "remap"
      inputs = ["application","infrastructure"]
      source = """
      .
      """
      
      
      
      
      # Adding _id field
      [transforms.elasticsearch_preprocess]
      type = "remap"
      inputs = ["pipeline_0_"]
      source = """
      ._id = encode_base64(uuid_v4())
      """
      
      
      [sinks.default]
      type = "elasticsearch"
      inputs = ["elasticsearch_preprocess"]
      endpoint = "https://elasticsearch.openshift-logging.svc:9200"
      index = "{{ log_type }}-write"
      request.timeout_secs = 2147483648
      bulk_action = "create"
      id_key = "_id"
      # TLS Config
      [sinks.default.tls]
      key_file = "/var/run/ocp-collector/secrets/collector/tls.key"
      crt_file = "/var/run/ocp-collector/secrets/collector/tls.crt"
      ca_file = "/var/run/ocp-collector/secrets/collector/ca-bundle.crt" 

      In above config file, I found:

      route.app = '!(starts_with!(.kubernetes.pod_namespace,"kube") && starts_with!(.kubernetes.pod_namespace,"openshift") && .kubernetes.pod_namespace == "default")' 

      I changed it to:

      route.app = '!(starts_with!(.kubernetes.pod_namespace,"kube") || starts_with!(.kubernetes.pod_namespace,"openshift") || .kubernetes.pod_namespace == "default")' 

      then the container logs in `openshift*` projects were not sent to app* indices.

       

      Version-Release number of selected component (if applicable):

      quay.io/openshift-logging/cluster-logging-operator@sha256:97997b3d465e6c78cc6c19921ced479281c60e96cbc2efdd1c7312bbb5722649

      How reproducible:

      Always

      Steps to Reproduce:
      1. deploy clusterlogging, using `vector` as the collector
      2. check logs in ES
      3.

      Actual results:

      Expected results:

      Additional info:

              vimalkum@redhat.com Vimal Kumar
              qitang@redhat.com Qiaoling Tang
              Votes:
              0 Vote for this issue
              Watchers:
              4 Start watching this issue

                Created:
                Updated:
                Resolved: