When processor.metrics.tls == DISABLED: $ oc get servicemonitor/flowlogs-pipeline-monitor -o yaml -n netobserv apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: creationTimestamp: "2023-06-12T14:30:32Z" generation: 1 labels: app: flowlogs-pipeline version: d9bd118352c5cc267c3ad6ba62dc29393a1df9a8ac98dd2f9e9d920f5bca063 name: flowlogs-pipeline-monitor namespace: netobserv ownerReferences: - apiVersion: flows.netobserv.io/v1beta1 blockOwnerDeletion: true controller: true kind: FlowCollector name: cluster uid: 1d160a6f-d480-4ecb-b145-49b3c8172b3f resourceVersion: "35555" uid: b21b6e43-b700-4625-b23e-95c86a327731 spec: endpoints: - bearerTokenSecret: key: "" interval: 15s port: prometheus scheme: http namespaceSelector: matchNames: - netobserv selector: matchLabels: app: flowlogs-pipeline When processor.metrics.tls is set to AUTO: $ oc get servicemonitor/flowlogs-pipeline-monitor -o yaml -n netobserv apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: creationTimestamp: "2023-06-12T14:30:32Z" generation: 2 labels: app: flowlogs-pipeline version: d9bd118352c5cc267c3ad6ba62dc29393a1df9a8ac98dd2f9e9d920f5bca063 name: flowlogs-pipeline-monitor namespace: netobserv ownerReferences: - apiVersion: flows.netobserv.io/v1beta1 blockOwnerDeletion: true controller: true kind: FlowCollector name: cluster uid: 1d160a6f-d480-4ecb-b145-49b3c8172b3f resourceVersion: "41813" uid: b21b6e43-b700-4625-b23e-95c86a327731 spec: endpoints: - bearerTokenSecret: key: "" interval: 15s port: prometheus scheme: https tlsConfig: ca: {} caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt cert: {} serverName: flowlogs-pipeline-prom.netobserv.svc namespaceSelector: matchNames: - netobserv selector: matchLabels: app: flowlogs-pipeline Verified metrics are being scraped by prometheus fine. When processor.metrics.tls == PROVIDED: $ oc get servicemonitor/flowlogs-pipeline-monitor -o yaml -n netobserv apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: creationTimestamp: "2023-06-12T14:30:32Z" generation: 3 labels: app: flowlogs-pipeline version: d9bd118352c5cc267c3ad6ba62dc29393a1df9a8ac98dd2f9e9d920f5bca063 name: flowlogs-pipeline-monitor namespace: netobserv ownerReferences: - apiVersion: flows.netobserv.io/v1beta1 blockOwnerDeletion: true controller: true kind: FlowCollector name: cluster uid: 1d160a6f-d480-4ecb-b145-49b3c8172b3f resourceVersion: "51421" uid: b21b6e43-b700-4625-b23e-95c86a327731 spec: endpoints: - bearerTokenSecret: key: "" interval: 15s port: prometheus scheme: https tlsConfig: ca: {} cert: {} insecureSkipVerify: true serverName: flowlogs-pipeline-prom.netobserv.svc namespaceSelector: matchNames: - netobserv selector: matchLabels: app: flowlogs-pipeline FLP logs error for metric: $ oc logs pod/flowlogs-pipeline-dw2dm Starting flowlogs-pipeline: ===== Build Version: Build Date: Using configuration: { "PipeLine": "[{\"name\":\"grpc\"},{\"follows\":\"grpc\",\"name\":\"enrich\"},{\"follows\":\"enrich\",\"name\":\"loki\"},{\"follows\":\"enrich\",\"name\":\"prometheus\"}]", "Parameters": "[{\"ingest\":{\"grpc\":{\"port\":2055},\"type\":\"grpc\"},\"name\":\"grpc\"},{\"name\":\"enrich\",\"transform\":{\"network\":{\"directionInfo\":{\"dstHostField\":\"DstK8S_HostIP\",\"flowDirectionField\":\"FlowDirection\",\"ifDirectionField\":\"IfDirection\",\"reporterIPField\":\"AgentIP\",\"srcHostField\":\"SrcK8S_HostIP\"},\"rules\":[{\"input\":\"SrcAddr\",\"output\":\"SrcK8S\",\"type\":\"add_kubernetes\"},{\"input\":\"DstAddr\",\"output\":\"DstK8S\",\"type\":\"add_kubernetes\"},{\"type\":\"reinterpret_direction\"}]},\"type\":\"network\"}},{\"name\":\"loki\",\"write\":{\"loki\":{\"batchSize\":10485760,\"batchWait\":\"1s\",\"clientConfig\":{\"follow_redirects\":false,\"proxy_url\":null,\"tls_config\":{\"insecure_skip_verify\":false}},\"labels\":[\"SrcK8S_Namespace\",\"SrcK8S_OwnerName\",\"DstK8S_Namespace\",\"DstK8S_OwnerName\",\"FlowDirection\"],\"maxBackoff\":\"5s\",\"maxRetries\":2,\"minBackoff\":\"1s\",\"staticLabels\":{\"app\":\"netobserv-flowcollector\"},\"tenantID\":\"netobserv\",\"timeout\":\"10s\",\"timestampLabel\":\"TimeFlowEndMs\",\"timestampScale\":\"1ms\",\"url\":\"http://loki.netobserv.svc:3100/\"},\"type\":\"loki\"}},{\"encode\":{\"prom\":{\"expiryTime\":\"0s\",\"metrics\":[{\"buckets\":null,\"filter\":{\"key\":\"\",\"value\":\"\"},\"labels\":[\"SrcK8S_Namespace\",\"DstK8S_Namespace\"],\"name\":\"namespace_flows_total\",\"type\":\"counter\",\"valueKey\":\"\"},{\"buckets\":null,\"filter\":{\"key\":\"FlowDirection\",\"value\":\"0\"},\"labels\":[\"SrcK8S_HostName\",\"DstK8S_HostName\"],\"name\":\"node_ingress_bytes_total\",\"type\":\"counter\",\"valueKey\":\"Bytes\"},{\"buckets\":null,\"filter\":{\"key\":\"FlowDirection\",\"value\":\"0\"},\"labels\":[\"SrcK8S_Namespace\",\"DstK8S_Namespace\",\"SrcK8S_OwnerName\",\"DstK8S_OwnerName\",\"SrcK8S_OwnerType\",\"DstK8S_OwnerType\"],\"name\":\"workload_ingress_bytes_total\",\"type\":\"counter\",\"valueKey\":\"Bytes\"}],\"prefix\":\"netobserv_\"},\"type\":\"prom\"},\"name\":\"prometheus\"}]", "MetricsSettings": "{\"nopanic\":true,\"port\":9102,\"prefix\":\"netobserv_\",\"tls\":{\"certpath\":\"/var/prom-certs/service-ca.crt\",\"keypath\":\"/var/prom-certs/\"}}", "Health": { "Address": "0.0.0.0", "Port": "8080" }, "Profile": { "Port": 6060 } } time=2023-06-12T15:09:50Z level=info msg=startServer: addr = :9102 time=2023-06-12T15:09:50Z level=error msg=error in http.ListenAndServe: read /var/prom-certs/: is a directory time=2023-06-12T15:09:51Z level=info msg=connecting stages: grpc --> enrich time=2023-06-12T15:09:51Z level=info msg=connecting stages: enrich --> loki time=2023-06-12T15:09:51Z level=info msg=connecting stages: enrich --> prometheus time=2023-06-12T15:09:51Z level=info msg=starting PProf HTTP listener port=6060 Documented as Known issue and created a follow up bug: https://issues.redhat.com/browse/NETOBSERV-1087 Verified with KAFKA mode: $ oc get servicemonitor/flowlogs-pipeline-transformer-monitor -o yaml apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: creationTimestamp: "2023-06-12T17:04:53Z" generation: 2 labels: app: flowlogs-pipeline-transformer version: d9bd118352c5cc267c3ad6ba62dc29393a1df9a8ac98dd2f9e9d920f5bca063 name: flowlogs-pipeline-transformer-monitor namespace: netobserv ownerReferences: - apiVersion: flows.netobserv.io/v1beta1 blockOwnerDeletion: true controller: true kind: FlowCollector name: cluster uid: 1d160a6f-d480-4ecb-b145-49b3c8172b3f resourceVersion: "99913" uid: 181688ca-7b88-4ca9-8756-acf5c8240534 spec: endpoints: - bearerTokenSecret: key: "" interval: 15s port: prometheus scheme: https tlsConfig: ca: {} caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt cert: {} serverName: flowlogs-pipeline-transformer-prom.netobserv.svc namespaceSelector: matchNames: - netobserv selector: matchLabels: app: flowlogs-pipeline-transformer When processor.metrics.tls == DISABLED w/ KAFKA: $ oc get servicemonitor/flowlogs-pipeline-transformer-monitor -o yaml apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: creationTimestamp: "2023-06-12T17:04:53Z" generation: 3 labels: app: flowlogs-pipeline-transformer version: d9bd118352c5cc267c3ad6ba62dc29393a1df9a8ac98dd2f9e9d920f5bca063 name: flowlogs-pipeline-transformer-monitor namespace: netobserv ownerReferences: - apiVersion: flows.netobserv.io/v1beta1 blockOwnerDeletion: true controller: true kind: FlowCollector name: cluster uid: 1d160a6f-d480-4ecb-b145-49b3c8172b3f resourceVersion: "101207" uid: 181688ca-7b88-4ca9-8756-acf5c8240534 spec: endpoints: - bearerTokenSecret: key: "" interval: 15s port: prometheus scheme: http namespaceSelector: matchNames: - netobserv selector: matchLabels: app: flowlogs-pipeline-transformer In both above cases, metrics are scrapped fine.